Skip to content

Commit

Permalink
Debug context: enforce usage of correct context instance
Browse files Browse the repository at this point in the history
Co-authored-by: Stepan Sindelar <stepan.sindelar@oracle.com>
  • Loading branch information
nirit100 and steve-s committed Nov 29, 2022
1 parent fa43733 commit 3550d56
Show file tree
Hide file tree
Showing 8 changed files with 1,931 additions and 193 deletions.
362 changes: 336 additions & 26 deletions hpy/debug/src/autogen_debug_ctx_call.i

Large diffs are not rendered by default.

1,306 changes: 1,166 additions & 140 deletions hpy/debug/src/autogen_debug_wrappers.c

Large diffs are not rendered by default.

118 changes: 112 additions & 6 deletions hpy/debug/src/debug_ctx.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,42 @@ static struct _HPyContext_s g_debug_ctx = {
.abi_version = HPY_ABI_VERSION,
};

static HPyDebugCtxInfo *init_ctx_info(HPyContext *dctx, HPyContext *uctx) {
HPyDebugCtxInfo *ctx_info = (HPyDebugCtxInfo*) malloc(sizeof(HPyDebugCtxInfo));
if (ctx_info == NULL) {
HPyErr_NoMemory(uctx);
return NULL;
}
dctx->_private = ctx_info;
ctx_info->magic_number = HPY_DEBUG_CTX_INFO_MAGIC;
ctx_info->is_valid = false;
return ctx_info;
}

static HPyContext *copy_debug_context(HPyContext *dctx) {
HPyDebugInfo *info = get_info(dctx);
HPyContext *new_dcxt = (HPyContext *) malloc(sizeof(struct _HPyContext_s));
memcpy(new_dcxt, dctx, sizeof(struct _HPyContext_s));
HPyDebugCtxInfo *ctx_info = init_ctx_info(new_dcxt, info->uctx);
if (ctx_info == NULL) {
return NULL;
}
ctx_info->info = info;
return new_dcxt;
}

static int init_dctx_cache(HPyContext *dctx, HPyDebugInfo *info) {
// We prefill the context cache to keep it simple
for (size_t i = 0; i < HPY_DEBUG_CTX_CACHE_SIZE; ++i) {
info->dctx_cache[i] = copy_debug_context(dctx);
if (info->dctx_cache[i] == NULL) {
return -1;
}
}
info->dctx_cache_current_index = 0;
return 0;
}

// NOTE: at the moment this function assumes that uctx is always the
// same. If/when we migrate to a system in which we can have multiple
// independent contexts, this function should ensure to create a different
Expand All @@ -25,13 +61,19 @@ int hpy_debug_ctx_init(HPyContext *dctx, HPyContext *uctx)
return 0;
}
// initialize debug_info
// XXX: currently we never free this malloc
HPyDebugInfo *info = malloc(sizeof(HPyDebugInfo));
// XXX: currently we never free this malloc and
// the allocations of the cached debug contexts
HPyDebugCtxInfo *ctx_info = init_ctx_info(dctx, uctx);
if (ctx_info == NULL) {
return -1;
}
ctx_info->is_valid = true;
HPyDebugInfo *info = ctx_info->info = malloc(sizeof(HPyDebugInfo));
if (info == NULL) {
HPyErr_NoMemory(uctx);
return -1;
}
info->magic_number = HPY_DEBUG_MAGIC;
info->magic_number = HPY_DEBUG_INFO_MAGIC;
info->uctx = uctx;
info->current_generation = 0;
info->uh_on_invalid_handle = HPy_NULL;
Expand All @@ -41,8 +83,10 @@ int hpy_debug_ctx_init(HPyContext *dctx, HPyContext *uctx)
info->protected_raw_data_size = 0;
DHQueue_init(&info->open_handles);
DHQueue_init(&info->closed_handles);
dctx->_private = info;
debug_ctx_init_fields(dctx, uctx);
if (init_dctx_cache(dctx, info) != 0) {
return -1;
}
return 0;
}

Expand Down Expand Up @@ -112,13 +156,38 @@ static void hpy_magic_dump(HPy h)
}
}

HPyContext* hpy_debug_get_next_dctx_from_cache(HPyContext *dctx) {
HPyDebugInfo *info = get_info(dctx);
HPyContext *result = info->dctx_cache[info->dctx_cache_current_index];
info->dctx_cache_current_index =
(info->dctx_cache_current_index + 1) % HPY_DEBUG_CTX_CACHE_SIZE;
return result;
}

void report_invalid_debug_context() {
fputs("Error: Wrong HPy Context!\n", stderr);
char *stacktrace;
create_stacktrace(&stacktrace, HPY_DEBUG_DEFAULT_STACKTRACE_LIMIT);
if (stacktrace != NULL) {
fputs(stacktrace, stderr);
}
fflush(stderr);
abort();
}

/* ~~~~~~~~~~ manually written wrappers ~~~~~~~~~~ */

void debug_ctx_Close(HPyContext *dctx, DHPy dh)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
UHPy uh = DHPy_unwrap(dctx, dh);
DHPy_close(dctx, dh);
// Note: this may run __del__
get_ctx_info(dctx)->is_valid = false;
HPy_Close(get_info(dctx)->uctx, uh);
get_ctx_info(dctx)->is_valid = true;
}

static void *
Expand All @@ -143,6 +212,9 @@ protect_and_associate_data_ptr(DHPy h, void *ptr, HPy_ssize_t data_size)

const char *debug_ctx_Unicode_AsUTF8AndSize(HPyContext *dctx, DHPy h, HPy_ssize_t *size)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
const char *ptr = HPyUnicode_AsUTF8AndSize(get_info(dctx)->uctx, DHPy_unwrap(dctx, h), size);
HPy_ssize_t data_size = 0;
if (ptr != NULL) {
Expand All @@ -153,6 +225,9 @@ const char *debug_ctx_Unicode_AsUTF8AndSize(HPyContext *dctx, DHPy h, HPy_ssize_

const char *debug_ctx_Bytes_AsString(HPyContext *dctx, DHPy h)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
HPyContext *uctx = get_info(dctx)->uctx;
UHPy uh = DHPy_unwrap(dctx, h);
const char *ptr = HPyBytes_AsString(uctx, uh);
Expand All @@ -166,6 +241,9 @@ const char *debug_ctx_Bytes_AsString(HPyContext *dctx, DHPy h)

const char *debug_ctx_Bytes_AS_STRING(HPyContext *dctx, DHPy h)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
HPyContext *uctx = get_info(dctx)->uctx;
UHPy uh = DHPy_unwrap(dctx, h);
const char *ptr = HPyBytes_AS_STRING(uctx, uh);
Expand All @@ -179,6 +257,9 @@ const char *debug_ctx_Bytes_AS_STRING(HPyContext *dctx, DHPy h)

DHPy debug_ctx_Tuple_FromArray(HPyContext *dctx, DHPy dh_items[], HPy_ssize_t n)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
UHPy *uh_items = (UHPy *)alloca(n * sizeof(UHPy));
for(int i=0; i<n; i++) {
uh_items[i] = DHPy_unwrap(dctx, dh_items[i]);
Expand All @@ -189,18 +270,28 @@ DHPy debug_ctx_Tuple_FromArray(HPyContext *dctx, DHPy dh_items[], HPy_ssize_t n)
DHPy debug_ctx_Type_GenericNew(HPyContext *dctx, DHPy dh_type, DHPy *dh_args,
HPy_ssize_t nargs, DHPy dh_kw)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
UHPy uh_type = DHPy_unwrap(dctx, dh_type);
UHPy uh_kw = DHPy_unwrap(dctx, dh_kw);
UHPy *uh_args = (UHPy *)alloca(nargs * sizeof(UHPy));
for(int i=0; i<nargs; i++) {
uh_args[i] = DHPy_unwrap(dctx, dh_args[i]);
}
return DHPy_open(dctx, HPyType_GenericNew(get_info(dctx)->uctx, uh_type, uh_args,
nargs, uh_kw));
get_ctx_info(dctx)->is_valid = false;
HPy uh_result = HPyType_GenericNew(get_info(dctx)->uctx, uh_type, uh_args,
nargs, uh_kw);
DHPy dh_result = DHPy_open(dctx, uh_result);
get_ctx_info(dctx)->is_valid = true;
return dh_result;
}

DHPy debug_ctx_Type_FromSpec(HPyContext *dctx, HPyType_Spec *spec, HPyType_SpecParam *dparams)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
// dparams might contain some hidden DHPy: we need to manually unwrap them.
if (dparams != NULL) {
// count the params
Expand Down Expand Up @@ -244,6 +335,9 @@ static const char *get_builtin_shape_name(HPyType_BuiltinShape shape)
#define MAKE_debug_ctx_AsStruct(SHAPE) \
void *debug_ctx_AsStruct_##SHAPE(HPyContext *dctx, DHPy dh) \
{ \
if (!get_ctx_info(dctx)->is_valid) { \
report_invalid_debug_context(); \
} \
HPyContext *uctx = get_info(dctx)->uctx; \
UHPy uh = DHPy_unwrap(dctx, dh); \
UHPy uh_type = HPy_Type(uctx, uh); \
Expand Down Expand Up @@ -314,21 +408,33 @@ MAKE_debug_ctx_AsStruct(List)

HPyTracker debug_ctx_Tracker_New(HPyContext *dctx, HPy_ssize_t size)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
return ctx_Tracker_New(dctx, size);
}

int debug_ctx_Tracker_Add(HPyContext *dctx, HPyTracker ht, DHPy dh)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
return ctx_Tracker_Add(dctx, ht, dh);
}

void debug_ctx_Tracker_ForgetAll(HPyContext *dctx, HPyTracker ht)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
ctx_Tracker_ForgetAll(dctx, ht);
}

void debug_ctx_Tracker_Close(HPyContext *dctx, HPyTracker ht)
{
if (!get_ctx_info(dctx)->is_valid) {
report_invalid_debug_context();
}
// note: ctx_Tracker_Close internally calls HPy_Close() to close each
// handle: since we are calling it with the dctx, it will end up calling
// debug_ctx_Close, which is exactly what we need to properly record that
Expand Down
Loading

0 comments on commit 3550d56

Please sign in to comment.