Skip to content

Commit

Permalink
chore: Simplify CometShuffleMemoryAllocator to use Spark unified memo…
Browse files Browse the repository at this point in the history
…ry allocator (#1063)
  • Loading branch information
viirya authored Nov 14, 2024
1 parent 9657b75 commit c32bf0c
Show file tree
Hide file tree
Showing 10 changed files with 336 additions and 148 deletions.
19 changes: 17 additions & 2 deletions common/src/main/scala/org/apache/comet/CometConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -322,8 +322,10 @@ object CometConf extends ShimCometConf {

val COMET_COLUMNAR_SHUFFLE_MEMORY_SIZE: OptionalConfigEntry[Long] =
conf("spark.comet.columnar.shuffle.memorySize")
.internal()
.doc(
"The optional maximum size of the memory used for Comet columnar shuffle, in MiB. " +
"Test-only config. This is only used to test Comet shuffle with Spark tests. " +
"The optional maximum size of the memory used for Comet columnar shuffle, in MiB. " +
"Note that this config is only used when `spark.comet.exec.shuffle.mode` is " +
"`jvm`. Once allocated memory size reaches this config, the current batch will be " +
"flushed to disk immediately. If this is not configured, Comet will use " +
Expand All @@ -335,8 +337,10 @@ object CometConf extends ShimCometConf {

val COMET_COLUMNAR_SHUFFLE_MEMORY_FACTOR: ConfigEntry[Double] =
conf("spark.comet.columnar.shuffle.memory.factor")
.internal()
.doc(
"Fraction of Comet memory to be allocated per executor process for Comet shuffle. " +
"Test-only config. This is only used to test Comet shuffle with Spark tests. " +
"Fraction of Comet memory to be allocated per executor process for Comet shuffle. " +
"Comet memory size is specified by `spark.comet.memoryOverhead` or " +
"calculated by `spark.comet.memory.overhead.factor` * `spark.executor.memory`.")
.doubleConf
Expand All @@ -345,6 +349,17 @@ object CometConf extends ShimCometConf {
"Ensure that Comet shuffle memory overhead factor is a double greater than 0")
.createWithDefault(1.0)

val COMET_COLUMNAR_SHUFFLE_UNIFIED_MEMORY_ALLOCATOR_IN_TEST: ConfigEntry[Boolean] =
conf("spark.comet.columnar.shuffle.unifiedMemoryAllocatorTest")
.doc("Whether to use Spark unified memory allocator for Comet columnar shuffle in tests." +
"If not configured, Comet will use a test-only memory allocator for Comet columnar " +
"shuffle when Spark test env detected. The test-ony allocator is proposed to run with " +
"Spark tests as these tests require on-heap memory configuration. " +
"By default, this config is false.")
.internal()
.booleanConf
.createWithDefault(false)

val COMET_COLUMNAR_SHUFFLE_BATCH_SIZE: ConfigEntry[Int] =
conf("spark.comet.columnar.shuffle.batch.size")
.internal()
Expand Down
1 change: 0 additions & 1 deletion docs/source/user-guide/configs.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ Comet provides the following configuration settings.
| spark.comet.columnar.shuffle.async.enabled | Whether to enable asynchronous shuffle for Arrow-based shuffle. | false |
| spark.comet.columnar.shuffle.async.max.thread.num | Maximum number of threads on an executor used for Comet async columnar shuffle. This is the upper bound of total number of shuffle threads per executor. In other words, if the number of cores * the number of shuffle threads per task `spark.comet.columnar.shuffle.async.thread.num` is larger than this config. Comet will use this config as the number of shuffle threads per executor instead. | 100 |
| spark.comet.columnar.shuffle.async.thread.num | Number of threads used for Comet async columnar shuffle per shuffle task. Note that more threads means more memory requirement to buffer shuffle data before flushing to disk. Also, more threads may not always improve performance, and should be set based on the number of cores available. | 3 |
| spark.comet.columnar.shuffle.memory.factor | Fraction of Comet memory to be allocated per executor process for Comet shuffle. Comet memory size is specified by `spark.comet.memoryOverhead` or calculated by `spark.comet.memory.overhead.factor` * `spark.executor.memory`. | 1.0 |
| spark.comet.convert.csv.enabled | When enabled, data from Spark (non-native) CSV v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |
| spark.comet.convert.json.enabled | When enabled, data from Spark (non-native) JSON v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |
| spark.comet.convert.parquet.enabled | When enabled, data from Spark (non-native) Parquet v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,179 +20,87 @@
package org.apache.spark.shuffle.comet;

import java.io.IOException;
import java.util.BitSet;

import org.apache.spark.SparkConf;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.MemoryMode;
import org.apache.spark.memory.SparkOutOfMemoryError;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.sql.internal.SQLConf;
import org.apache.spark.unsafe.array.LongArray;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.unsafe.memory.UnsafeMemoryAllocator;
import org.apache.spark.util.Utils;

import org.apache.comet.CometSparkSessionExtensions$;
import org.apache.comet.CometConf$;

/**
* A simple memory allocator used by `CometShuffleExternalSorter` to allocate memory blocks which
* store serialized rows. We don't rely on Spark memory allocator because we need to allocate
* off-heap memory no matter memory mode is on-heap or off-heap. This allocator is configured with
* fixed size of memory, and it will throw `SparkOutOfMemoryError` if the memory is not enough.
*
* <p>Some methods are copied from `org.apache.spark.unsafe.memory.TaskMemoryManager` with
* modifications. Most modifications are to remove the dependency on the configured memory mode.
* store serialized rows. This class is simply an implementation of `MemoryConsumer` that delegates
* memory allocation to the `TaskMemoryManager`. This requires that the `TaskMemoryManager` is
* configured with `MemoryMode.OFF_HEAP`, i.e. it is using off-heap memory.
*/
public final class CometShuffleMemoryAllocator extends MemoryConsumer {
private final UnsafeMemoryAllocator allocator = new UnsafeMemoryAllocator();

private final long pageSize;
private final long totalMemory;
private long allocatedMemory = 0L;

/** The number of bits used to address the page table. */
private static final int PAGE_NUMBER_BITS = 13;

/** The number of entries in the page table. */
private static final int PAGE_TABLE_SIZE = 1 << PAGE_NUMBER_BITS;

private final MemoryBlock[] pageTable = new MemoryBlock[PAGE_TABLE_SIZE];
private final BitSet allocatedPages = new BitSet(PAGE_TABLE_SIZE);
public final class CometShuffleMemoryAllocator extends CometShuffleMemoryAllocatorTrait {
private static CometShuffleMemoryAllocatorTrait INSTANCE;

private static final int OFFSET_BITS = 51;
private static final long MASK_LONG_LOWER_51_BITS = 0x7FFFFFFFFFFFFL;

private static CometShuffleMemoryAllocator INSTANCE;

public static synchronized CometShuffleMemoryAllocator getInstance(
/**
* Returns the singleton instance of `CometShuffleMemoryAllocator`. This method should be used
* instead of the constructor to ensure that only one instance of `CometShuffleMemoryAllocator` is
* created. For Spark tests, this returns `CometTestShuffleMemoryAllocator` which is a test-only
* allocator that should not be used in production.
*/
public static CometShuffleMemoryAllocatorTrait getInstance(
SparkConf conf, TaskMemoryManager taskMemoryManager, long pageSize) {
if (INSTANCE == null) {
INSTANCE = new CometShuffleMemoryAllocator(conf, taskMemoryManager, pageSize);
boolean isSparkTesting = Utils.isTesting();
boolean useUnifiedMemAllocator =
(boolean)
CometConf$.MODULE$.COMET_COLUMNAR_SHUFFLE_UNIFIED_MEMORY_ALLOCATOR_IN_TEST().get();

if (isSparkTesting && !useUnifiedMemAllocator) {
synchronized (CometShuffleMemoryAllocator.class) {
if (INSTANCE == null) {
// CometTestShuffleMemoryAllocator handles pages by itself so it can be a singleton.
INSTANCE = new CometTestShuffleMemoryAllocator(conf, taskMemoryManager, pageSize);
}
}
return INSTANCE;
} else {
if (taskMemoryManager.getTungstenMemoryMode() != MemoryMode.OFF_HEAP) {
throw new IllegalArgumentException(
"CometShuffleMemoryAllocator should be used with off-heap "
+ "memory mode, but got "
+ taskMemoryManager.getTungstenMemoryMode());
}

// CometShuffleMemoryAllocator stores pages in TaskMemoryManager which is not singleton,
// but one instance per task. So we need to create a new instance for each task.
return new CometShuffleMemoryAllocator(taskMemoryManager, pageSize);
}

return INSTANCE;
}

CometShuffleMemoryAllocator(SparkConf conf, TaskMemoryManager taskMemoryManager, long pageSize) {
CometShuffleMemoryAllocator(TaskMemoryManager taskMemoryManager, long pageSize) {
super(taskMemoryManager, pageSize, MemoryMode.OFF_HEAP);
this.pageSize = pageSize;
this.totalMemory =
CometSparkSessionExtensions$.MODULE$.getCometShuffleMemorySize(conf, SQLConf.get());
}

public synchronized long acquireMemory(long size) {
if (allocatedMemory >= totalMemory) {
throw new SparkOutOfMemoryError(
"Unable to acquire "
+ size
+ " bytes of memory, current usage "
+ "is "
+ allocatedMemory
+ " bytes and max memory is "
+ totalMemory
+ " bytes");
}
long allocationSize = Math.min(size, totalMemory - allocatedMemory);
allocatedMemory += allocationSize;
return allocationSize;
}

public long spill(long l, MemoryConsumer memoryConsumer) throws IOException {
// JVM shuffle writer does not support spilling for other memory consumers
return 0;
}

public synchronized LongArray allocateArray(long size) {
long required = size * 8L;
MemoryBlock page = allocate(required);
return new LongArray(page);
}

public synchronized void freeArray(LongArray array) {
if (array == null) {
return;
}
free(array.memoryBlock());
}

public synchronized MemoryBlock allocatePage(long required) {
long size = Math.max(pageSize, required);
return allocate(size);
}

private synchronized MemoryBlock allocate(long required) {
if (required > TaskMemoryManager.MAXIMUM_PAGE_SIZE_BYTES) {
throw new TooLargePageException(required);
}

long got = acquireMemory(required);

if (got < required) {
allocatedMemory -= got;

throw new SparkOutOfMemoryError(
"Unable to acquire "
+ required
+ " bytes of memory, got "
+ got
+ " bytes. Available: "
+ (totalMemory - allocatedMemory));
}

int pageNumber = allocatedPages.nextClearBit(0);
if (pageNumber >= PAGE_TABLE_SIZE) {
allocatedMemory -= got;

throw new IllegalStateException(
"Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages");
}

MemoryBlock block = allocator.allocate(got);

block.pageNumber = pageNumber;
pageTable[pageNumber] = block;
allocatedPages.set(pageNumber);

return block;
public synchronized MemoryBlock allocate(long required) {
return this.allocatePage(required);
}

public synchronized void free(MemoryBlock block) {
if (block.pageNumber == MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) {
// Already freed block
return;
}
allocatedMemory -= block.size();

pageTable[block.pageNumber] = null;
allocatedPages.clear(block.pageNumber);
block.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;

allocator.free(block);
}

public synchronized long getAvailableMemory() {
return totalMemory - allocatedMemory;
this.freePage(block);
}

/**
* Returns the offset in the page for the given page plus base offset address. Note that this
* method assumes that the page number is valid.
*/
public long getOffsetInPage(long pagePlusOffsetAddress) {
long offsetInPage = decodeOffset(pagePlusOffsetAddress);
int pageNumber = TaskMemoryManager.decodePageNumber(pagePlusOffsetAddress);
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
MemoryBlock page = pageTable[pageNumber];
assert (page != null);
return page.getBaseOffset() + offsetInPage;
}

public long decodeOffset(long pagePlusOffsetAddress) {
return pagePlusOffsetAddress & MASK_LONG_LOWER_51_BITS;
return taskMemoryManager.getOffsetInPage(pagePlusOffsetAddress);
}

public long encodePageNumberAndOffset(int pageNumber, long offsetInPage) {
assert (pageNumber >= 0);
return ((long) pageNumber) << OFFSET_BITS | offsetInPage & MASK_LONG_LOWER_51_BITS;
return TaskMemoryManager.encodePageNumberAndOffset(pageNumber, offsetInPage);
}

public long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.spark.shuffle.comet;

import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.MemoryMode;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.unsafe.memory.MemoryBlock;

/** The base class for Comet JVM shuffle memory allocators. */
public abstract class CometShuffleMemoryAllocatorTrait extends MemoryConsumer {
protected CometShuffleMemoryAllocatorTrait(
TaskMemoryManager taskMemoryManager, long pageSize, MemoryMode mode) {
super(taskMemoryManager, pageSize, mode);
}

public abstract MemoryBlock allocate(long required);

public abstract void free(MemoryBlock block);

public abstract long getOffsetInPage(long pagePlusOffsetAddress);

public abstract long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage);
}
Loading

0 comments on commit c32bf0c

Please sign in to comment.