Skip to content

Commit

Permalink
Remove dedicated SSL network write buffer (elastic#41283)
Browse files Browse the repository at this point in the history
This is related to elastic#27260. Currently for the SSLDriver we allocate a
dedicated network write buffer and encrypt the data into that buffer one
buffer at a time. This requires constantly switching between encrypting
and flushing. This commit adds a dedicated outbound buffer for SSL
operations that will internally allocate new packet sized buffers as
they are need (for writing encrypted data). This allows us to totally
encrypt an operation before writing it to the network. Eventually it can
be hooked up to buffer recycling.
  • Loading branch information
Tim-Brooks authored and Gurkan Kaymak committed May 27, 2019
1 parent 6e32d95 commit 08a97c7
Show file tree
Hide file tree
Showing 22 changed files with 480 additions and 329 deletions.
31 changes: 26 additions & 5 deletions libs/nio/src/main/java/org/elasticsearch/nio/FlushOperation.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@

public class FlushOperation {

private static final ByteBuffer[] EMPTY_ARRAY = new ByteBuffer[0];

private final BiConsumer<Void, Exception> listener;
private final ByteBuffer[] buffers;
private final int[] offsets;
Expand Down Expand Up @@ -61,19 +63,38 @@ public void incrementIndex(int delta) {
}

public ByteBuffer[] getBuffersToWrite() {
return getBuffersToWrite(length);
}

public ByteBuffer[] getBuffersToWrite(int maxBytes) {
final int index = Arrays.binarySearch(offsets, internalIndex);
int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index;
final int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index;
final int finalIndex = Arrays.binarySearch(offsets, Math.min(internalIndex + maxBytes, length));
final int finalOffsetIndex = finalIndex < 0 ? (-(finalIndex + 1)) - 1 : finalIndex;

ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex];
int nBuffers = (finalOffsetIndex - offsetIndex) + 1;

int firstBufferPosition = internalIndex - offsets[offsetIndex];
ByteBuffer firstBuffer = buffers[offsetIndex].duplicate();
firstBuffer.position(internalIndex - offsets[offsetIndex]);
firstBuffer.position(firstBufferPosition);
if (nBuffers == 1 && firstBuffer.remaining() == 0) {
return EMPTY_ARRAY;
}

ByteBuffer[] postIndexBuffers = new ByteBuffer[nBuffers];
postIndexBuffers[0] = firstBuffer;
int finalOffset = offsetIndex + nBuffers;
int nBytes = firstBuffer.remaining();
int j = 1;
for (int i = (offsetIndex + 1); i < buffers.length; ++i) {
postIndexBuffers[j++] = buffers[i].duplicate();
for (int i = (offsetIndex + 1); i < finalOffset; ++i) {
ByteBuffer buffer = buffers[i].duplicate();
nBytes += buffer.remaining();
postIndexBuffers[j++] = buffer;
}

int excessBytes = Math.max(0, nBytes - maxBytes);
ByteBuffer lastBuffer = postIndexBuffers[postIndexBuffers.length - 1];
lastBuffer.limit(lastBuffer.limit() - excessBytes);
return postIndexBuffers;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ public class FlushReadyWrite extends FlushOperation implements WriteOperation {
private final SocketChannelContext channelContext;
private final ByteBuffer[] buffers;

FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer<Void, Exception> listener) {
public FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer<Void, Exception> listener) {
super(buffers, listener);
this.channelContext = channelContext;
this.buffers = buffers;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

package org.elasticsearch.nio;

import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.nio.utils.ExceptionsHelper;

import java.nio.ByteBuffer;
Expand Down Expand Up @@ -140,11 +139,11 @@ public ByteBuffer[] sliceBuffersTo(long to) {

ByteBuffer[] buffers = new ByteBuffer[pageCount];
Iterator<Page> pageIterator = pages.iterator();
ByteBuffer firstBuffer = pageIterator.next().byteBuffer.duplicate();
ByteBuffer firstBuffer = pageIterator.next().byteBuffer().duplicate();
firstBuffer.position(firstBuffer.position() + offset);
buffers[0] = firstBuffer;
for (int i = 1; i < buffers.length; i++) {
buffers[i] = pageIterator.next().byteBuffer.duplicate();
buffers[i] = pageIterator.next().byteBuffer().duplicate();
}
if (finalLimit != 0) {
buffers[buffers.length - 1].limit(finalLimit);
Expand Down Expand Up @@ -180,14 +179,14 @@ public Page[] sliceAndRetainPagesTo(long to) {
Page[] pages = new Page[pageCount];
Iterator<Page> pageIterator = this.pages.iterator();
Page firstPage = pageIterator.next().duplicate();
ByteBuffer firstBuffer = firstPage.byteBuffer;
ByteBuffer firstBuffer = firstPage.byteBuffer();
firstBuffer.position(firstBuffer.position() + offset);
pages[0] = firstPage;
for (int i = 1; i < pages.length; i++) {
pages[i] = pageIterator.next().duplicate();
}
if (finalLimit != 0) {
pages[pages.length - 1].byteBuffer.limit(finalLimit);
pages[pages.length - 1].byteBuffer().limit(finalLimit);
}

return pages;
Expand Down Expand Up @@ -217,9 +216,9 @@ public ByteBuffer[] sliceBuffersFrom(long from) {
ByteBuffer[] buffers = new ByteBuffer[pages.size() - pageIndex];
Iterator<Page> pageIterator = pages.descendingIterator();
for (int i = buffers.length - 1; i > 0; --i) {
buffers[i] = pageIterator.next().byteBuffer.duplicate();
buffers[i] = pageIterator.next().byteBuffer().duplicate();
}
ByteBuffer firstPostIndexBuffer = pageIterator.next().byteBuffer.duplicate();
ByteBuffer firstPostIndexBuffer = pageIterator.next().byteBuffer().duplicate();
firstPostIndexBuffer.position(firstPostIndexBuffer.position() + indexInPage);
buffers[0] = firstPostIndexBuffer;

Expand Down Expand Up @@ -268,53 +267,4 @@ private int pageIndex(long index) {
private int indexInPage(long index) {
return (int) (index & PAGE_MASK);
}

public static class Page implements AutoCloseable {

private final ByteBuffer byteBuffer;
// This is reference counted as some implementations want to retain the byte pages by calling
// sliceAndRetainPagesTo. With reference counting we can increment the reference count, return the
// pages, and safely close them when this channel buffer is done with them. The reference count
// would be 1 at that point, meaning that the pages will remain until the implementation closes
// theirs.
private final RefCountedCloseable refCountedCloseable;

public Page(ByteBuffer byteBuffer, Runnable closeable) {
this(byteBuffer, new RefCountedCloseable(closeable));
}

private Page(ByteBuffer byteBuffer, RefCountedCloseable refCountedCloseable) {
this.byteBuffer = byteBuffer;
this.refCountedCloseable = refCountedCloseable;
}

private Page duplicate() {
refCountedCloseable.incRef();
return new Page(byteBuffer.duplicate(), refCountedCloseable);
}

public ByteBuffer getByteBuffer() {
return byteBuffer;
}

@Override
public void close() {
refCountedCloseable.decRef();
}

private static class RefCountedCloseable extends AbstractRefCounted {

private final Runnable closeable;

private RefCountedCloseable(Runnable closeable) {
super("byte array page");
this.closeable = closeable;
}

@Override
protected void closeInternal() {
closeable.run();
}
}
}
}
89 changes: 89 additions & 0 deletions libs/nio/src/main/java/org/elasticsearch/nio/Page.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.nio;

import org.elasticsearch.common.util.concurrent.AbstractRefCounted;

import java.io.Closeable;
import java.nio.ByteBuffer;

public class Page implements Closeable {

private final ByteBuffer byteBuffer;
// This is reference counted as some implementations want to retain the byte pages by calling
// duplicate. With reference counting we can increment the reference count, return a new page,
// and safely close the pages independently. The closeable will not be called until each page is
// released.
private final RefCountedCloseable refCountedCloseable;

public Page(ByteBuffer byteBuffer) {
this(byteBuffer, () -> {});
}

public Page(ByteBuffer byteBuffer, Runnable closeable) {
this(byteBuffer, new RefCountedCloseable(closeable));
}

private Page(ByteBuffer byteBuffer, RefCountedCloseable refCountedCloseable) {
this.byteBuffer = byteBuffer;
this.refCountedCloseable = refCountedCloseable;
}

/**
* Duplicates this page and increments the reference count. The new page must be closed independently
* of the original page.
*
* @return the new page
*/
public Page duplicate() {
refCountedCloseable.incRef();
return new Page(byteBuffer.duplicate(), refCountedCloseable);
}

/**
* Returns the {@link ByteBuffer} for this page. Modifications to the limits, positions, etc of the
* buffer will also mutate this page. Call {@link ByteBuffer#duplicate()} to avoid mutating the page.
*
* @return the byte buffer
*/
public ByteBuffer byteBuffer() {
return byteBuffer;
}

@Override
public void close() {
refCountedCloseable.decRef();
}

private static class RefCountedCloseable extends AbstractRefCounted {

private final Runnable closeable;

private RefCountedCloseable(Runnable closeable) {
super("byte array page");
this.closeable = closeable;
}

@Override
protected void closeInternal() {
closeable.run();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ protected int flushToChannel(FlushOperation flushOperation) throws IOException {
ioBuffer.clear();
ioBuffer.limit(Math.min(WRITE_LIMIT, ioBuffer.limit()));
int j = 0;
ByteBuffer[] buffers = flushOperation.getBuffersToWrite();
ByteBuffer[] buffers = flushOperation.getBuffersToWrite(WRITE_LIMIT);
while (j < buffers.length && ioBuffer.remaining() > 0) {
ByteBuffer buffer = buffers[j++];
copyBytes(buffer, ioBuffer);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import java.util.function.Consumer;

import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
Expand Down Expand Up @@ -168,7 +169,7 @@ public void testQueuedWriteIsFlushedInFlushCall() throws Exception {

assertTrue(context.readyForFlush());

when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
when(flushOperation.getBuffersToWrite(anyInt())).thenReturn(buffers);
when(flushOperation.isFullyFlushed()).thenReturn(false, true);
when(flushOperation.getListener()).thenReturn(listener);
context.flushChannel();
Expand All @@ -187,7 +188,7 @@ public void testPartialFlush() throws IOException {
assertTrue(context.readyForFlush());

when(flushOperation.isFullyFlushed()).thenReturn(false);
when(flushOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
when(flushOperation.getBuffersToWrite(anyInt())).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
context.flushChannel();

verify(listener, times(0)).accept(null, null);
Expand All @@ -201,8 +202,8 @@ public void testMultipleWritesPartialFlushes() throws IOException {
BiConsumer<Void, Exception> listener2 = mock(BiConsumer.class);
FlushReadyWrite flushOperation1 = mock(FlushReadyWrite.class);
FlushReadyWrite flushOperation2 = mock(FlushReadyWrite.class);
when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
when(flushOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
when(flushOperation1.getBuffersToWrite(anyInt())).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
when(flushOperation2.getBuffersToWrite(anyInt())).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
when(flushOperation1.getListener()).thenReturn(listener);
when(flushOperation2.getListener()).thenReturn(listener2);

Expand Down Expand Up @@ -237,7 +238,7 @@ public void testWhenIOExceptionThrownListenerIsCalled() throws IOException {
assertTrue(context.readyForFlush());

IOException exception = new IOException();
when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
when(flushOperation.getBuffersToWrite(anyInt())).thenReturn(buffers);
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(exception);
when(flushOperation.getListener()).thenReturn(listener);
expectThrows(IOException.class, () -> context.flushChannel());
Expand All @@ -252,7 +253,7 @@ public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException {
context.queueWriteOperation(flushOperation);

IOException exception = new IOException();
when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
when(flushOperation.getBuffersToWrite(anyInt())).thenReturn(buffers);
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(exception);

assertFalse(context.selectorShouldClose());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,29 +65,45 @@ public void testMultipleFlushesWithCompositeBuffer() throws IOException {
ByteBuffer[] byteBuffers = writeOp.getBuffersToWrite();
assertEquals(3, byteBuffers.length);
assertEquals(5, byteBuffers[0].remaining());
ByteBuffer[] byteBuffersWithLimit = writeOp.getBuffersToWrite(10);
assertEquals(2, byteBuffersWithLimit.length);
assertEquals(5, byteBuffersWithLimit[0].remaining());
assertEquals(5, byteBuffersWithLimit[1].remaining());

writeOp.incrementIndex(5);
assertFalse(writeOp.isFullyFlushed());
byteBuffers = writeOp.getBuffersToWrite();
assertEquals(2, byteBuffers.length);
assertEquals(15, byteBuffers[0].remaining());
assertEquals(3, byteBuffers[1].remaining());
byteBuffersWithLimit = writeOp.getBuffersToWrite(10);
assertEquals(1, byteBuffersWithLimit.length);
assertEquals(10, byteBuffersWithLimit[0].remaining());

writeOp.incrementIndex(2);
assertFalse(writeOp.isFullyFlushed());
byteBuffers = writeOp.getBuffersToWrite();
assertEquals(2, byteBuffers.length);
assertEquals(13, byteBuffers[0].remaining());
assertEquals(3, byteBuffers[1].remaining());
byteBuffersWithLimit = writeOp.getBuffersToWrite(10);
assertEquals(1, byteBuffersWithLimit.length);
assertEquals(10, byteBuffersWithLimit[0].remaining());

writeOp.incrementIndex(15);
assertFalse(writeOp.isFullyFlushed());
byteBuffers = writeOp.getBuffersToWrite();
assertEquals(1, byteBuffers.length);
assertEquals(1, byteBuffers[0].remaining());
byteBuffersWithLimit = writeOp.getBuffersToWrite(10);
assertEquals(1, byteBuffersWithLimit.length);
assertEquals(1, byteBuffersWithLimit[0].remaining());

writeOp.incrementIndex(1);
assertTrue(writeOp.isFullyFlushed());
byteBuffers = writeOp.getBuffersToWrite();
assertEquals(1, byteBuffers.length);
assertEquals(0, byteBuffers[0].remaining());
assertEquals(0, byteBuffers.length);
byteBuffersWithLimit = writeOp.getBuffersToWrite(10);
assertEquals(0, byteBuffersWithLimit.length);
}
}
Loading

0 comments on commit 08a97c7

Please sign in to comment.