Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix memtracer bad assumptions on the size of stack trace #1122

Merged
merged 5 commits into from
Jun 5, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 37 additions & 22 deletions source/memtrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,38 +131,53 @@ static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t
aws_high_res_clock_get_ticks(&alloc->time);

if (tracer->level == AWS_MEMTRACE_STACKS) {
/* capture stack frames, skip 2 for this function and the allocation vtable function */
/* capture stack frames,
* skip 2 for this function and the allocation vtable function if we have a full stack trace
* and otherwise just capture what ever stack trace we got
*/
AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack));
size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack);
if (stack_depth >= FRAMES_TO_SKIP) {
/* hash the stack pointers */
struct aws_byte_cursor stack_cursor =
aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
alloc->stack = stack_id; /* associate the stack with the alloc */

aws_mutex_lock(&tracer->mutex);
struct aws_hash_element *item = NULL;
int was_created = 0;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS ==
aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
/* If this is a new stack, save it to the hash */
if (was_created) {
struct stack_trace *stack = aws_mem_calloc(
aws_default_allocator(),
1,
sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
AWS_FATAL_ASSERT(stack);
AWS_FATAL_ASSERT(stack_depth > 0);

/* hash the stack pointers */
struct aws_byte_cursor stack_cursor = aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
alloc->stack = stack_id; /* associate the stack with the alloc */

aws_mutex_lock(&tracer->mutex);
struct aws_hash_element *item = NULL;
int was_created = 0;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS == aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
/* If this is a new stack, save it to the hash */
if (was_created) {
struct stack_trace *stack = aws_mem_calloc(
aws_default_allocator(), 1, sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
AWS_FATAL_ASSERT(stack);
/**
* Optimizations can affect the number of frames we get and in pathological cases we can
* get fewer than FRAMES_TO_SKIP frames, but always at least 1 because code has to start somewhere.
* (looking at you gcc with -O3 on aarch64)
* With optimizations on we cannot trust the stack trace too much.
* Memtracer makes an assumption that stack trace will be available in all cases if stack trace api
* works. So in the pathological case of stack_depth <= FRAMES_TO_SKIP lets record all the frames we
* have, to at least have an anchor for where allocation is comming from, however inaccurate it is.
*/
if (stack_depth <= FRAMES_TO_SKIP) {
memcpy((void **)&stack->frames[0], &stack_frames[0], (stack_depth) * sizeof(void *));
stack->depth = stack_depth;
item->value = stack;
} else {
memcpy(
(void **)&stack->frames[0],
&stack_frames[FRAMES_TO_SKIP],
(stack_depth - FRAMES_TO_SKIP) * sizeof(void *));
stack->depth = stack_depth - FRAMES_TO_SKIP;
item->value = stack;
}
aws_mutex_unlock(&tracer->mutex);
}

aws_mutex_unlock(&tracer->mutex);
}

aws_mutex_lock(&tracer->mutex);
Expand Down
Loading