Skip to content

Commit

Permalink
Merge branch 'master' into mri-redundancies
Browse files Browse the repository at this point in the history
  • Loading branch information
tsalo authored Sep 1, 2022
2 parents d71e7f4 + 002d0b4 commit 7049f29
Show file tree
Hide file tree
Showing 2 changed files with 112 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1227,8 +1227,8 @@ EPI scans with different phase encoding directions to estimate the distortion
map corresponding to the nonuniformities of the *B<sub>0</sub>* field.
These `*_epi.nii[.gz]` - or `*_m0scan.nii[.gz]` for arterial spin labeling perfusion data - files can be 3D or 4D --
in the latter case, all timepoints share the same scanning parameters.
Examples of software tools using these kinds of images are FSL TOPUP,
AFNI `3dqwarp`, and SPM.
Examples of software tools using these kinds of images are FSL TOPUP and
AFNI `3dqwarp`.

<!--
This block generates a filename templates.
Expand Down
147 changes: 110 additions & 37 deletions tools/schemacode/bidsschematools/render.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@
utils.set_logger_level(lgr, os.environ.get("BIDS_SCHEMA_LOG_LEVEL", logging.INFO))
logging.basicConfig(format="%(asctime)-15s [%(levelname)8s] %(message)s")

# Remember to add extension (.html or .md) to the paths when using them.
ENTITIES_PATH = "SPEC_ROOT/99-appendices/09-entities"
GLOSSARY_PATH = "SPEC_ROOT/99-appendices/14-glossary"
TYPE_CONVERTER = {
"associated_data": "associated data",
"columns": "column",
Expand Down Expand Up @@ -289,8 +292,6 @@ def make_filename_template(

schema = Namespace(filter_schema(schema.to_dict(), **kwargs))
entity_order = schema["rules"]["entities"]
entities_path = "SPEC_ROOT/99-appendices/09-entities.html"
glossary_path = "SPEC_ROOT/99-appendices/14-glossary.html"

paragraph = ""
# Parent directories
Expand All @@ -300,7 +301,7 @@ def make_filename_template(
)
paragraph += utils._link_with_html(
sub_string,
html_path=entities_path,
html_path=ENTITIES_PATH + ".html",
heading="sub",
pdf_format=pdf_format,
)
Expand All @@ -311,7 +312,7 @@ def make_filename_template(
)
paragraph += utils._link_with_html(
ses_string,
html_path=entities_path,
html_path=ENTITIES_PATH + ".html",
heading="ses",
pdf_format=pdf_format,
)
Expand All @@ -328,7 +329,7 @@ def make_filename_template(
paragraph += "\t\t"
paragraph += utils._link_with_html(
datatype,
html_path=glossary_path,
html_path=GLOSSARY_PATH + ".html",
heading=f"{datatype.lower()}-datatypes",
pdf_format=pdf_format,
)
Expand All @@ -346,23 +347,23 @@ def make_filename_template(
)
ent_format = utils._link_with_html(
ent_format,
html_path=entities_path,
html_path=ENTITIES_PATH + ".html",
heading=schema["objects"]["entities"][ent]["name"],
pdf_format=pdf_format,
)
else:
# Standard entity key-value pattern with simple label/index
ent_format = utils._link_with_html(
schema["objects"]["entities"][ent]["name"],
html_path=entities_path,
html_path=ENTITIES_PATH + ".html",
heading=schema["objects"]["entities"][ent]["name"],
pdf_format=pdf_format,
)
ent_format += "-"
ent_format += "<" if pdf_format else "&lt;"
ent_format += utils._link_with_html(
schema["objects"]["entities"][ent].get("format", "label"),
html_path=glossary_path,
html_path=GLOSSARY_PATH + ".html",
heading=(
f'{schema["objects"]["entities"][ent].get("format", "label")}-formats'
),
Expand Down Expand Up @@ -394,7 +395,7 @@ def make_filename_template(
string += "<" if pdf_format else "&lt;"
string += utils._link_with_html(
"suffix",
html_path=glossary_path,
html_path=GLOSSARY_PATH + ".html",
heading="suffix-common_principles",
pdf_format=pdf_format,
)
Expand All @@ -412,7 +413,7 @@ def make_filename_template(

suffix_string = utils._link_with_html(
suffix,
html_path=glossary_path,
html_path=GLOSSARY_PATH + ".html",
heading=f"{suffix_id.lower()}-suffixes",
pdf_format=pdf_format,
)
Expand Down Expand Up @@ -447,7 +448,7 @@ def make_filename_template(

extensions = utils.combine_extensions(
extensions,
html_path=glossary_path,
html_path=GLOSSARY_PATH + ".html",
heading_lst=ext_headings,
pdf_format=pdf_format,
)
Expand Down Expand Up @@ -701,9 +702,41 @@ def preproc(desc):
return table_str


def make_obj_table(subschema, field_info, src_path=None, tablefmt="github"):
# Use the "name" field in the table, to allow for filenames to not match
# "names".
def make_obj_table(
subschema,
field_info,
field_type,
src_path=None,
tablefmt="github",
n_values_to_combine=15,
):
"""Make a generic table describing objects in the schema.
This does shared work between describing metadata fields and subobjects in the schema.
Parameters
----------
subschema : Namespace or dict
Subset of the overall schema, including only the target object definitions.
field_info : dict
Additional information about each row in the table to be added to schema information.
Keys should match "name" entries in ``subschema``.
Values should be either a string (in which case, it's requirement information) or
a tuple (in which case, the first entry is requirement info and the second is
information to be added to the object's description).
field_type : str
The name of the field type. For example, "metadata".
src_path : str | None
The file where this macro is called, which may be explicitly provided
by the "page.file.src_path" variable.
tablefmt : string, optional
The target table format. The default is "github" (GitHub format).
n_values_to_combine : int, optional
When there are many valid values for a given object,
instead of listing each one in the table,
link to the associated glossary entry.
"""
# Use the "name" field in the table, to allow for filenames to not match "names".
df = pd.DataFrame(
index=[subschema[f]["name"] for f in field_info],
columns=["**Requirement Level**", "**Data type**", "**Description**"],
Expand All @@ -728,8 +761,19 @@ def make_obj_table(subschema, field_info, src_path=None, tablefmt="github"):
subschema[field]["description"] + " " + description_addendum
)

# Try to add info about valid values
valid_values_str = utils.describe_valid_values(subschema[field])
if (
"enum" in subschema[field].keys()
and len(subschema[field]["enum"]) >= n_values_to_combine
):
glossary_entry = f"{GLOSSARY_PATH}.md#objects.{field}"
valid_values_str = (
"For a list of valid values for this field, see the "
f"[associated glossary entry]({glossary_entry})."
)
else:
# Try to add info about valid values
valid_values_str = utils.describe_valid_values(subschema[field])

if valid_values_str:
description += "\n\n\n\n" + valid_values_str

Expand Down Expand Up @@ -772,8 +816,10 @@ def make_sidecar_table(
table_str : str
The tabulated table as a Markdown string.
"""
field_type = "metadata"
if isinstance(table_name, str):
table_name = [table_name]

fields = {}
for table in table_name:
new_fields = schema.rules.sidecars[table].fields
Expand All @@ -783,14 +829,14 @@ def make_sidecar_table(
f"Schema tables {table_name} share overlapping fields: {overlap}"
)
fields.update(new_fields)
metadata = schema.objects.metadata

retained_fields = [f for f in fields if f in metadata]
dropped_fields = [f for f in fields if f not in metadata]
subschema = schema.objects[field_type]
retained_fields = [f for f in fields if f in subschema]
dropped_fields = [f for f in fields if f not in subschema]
if dropped_fields:
print("Warning: Missing fields: {}".format(", ".join(dropped_fields)))

metadata_schema = {k: v for k, v in metadata.items() if k in retained_fields}
subschema = {k: v for k, v in subschema.items() if k in retained_fields}
field_info = {}
for field, val in fields.items():
if isinstance(val, str):
Expand All @@ -811,8 +857,9 @@ def make_sidecar_table(
field_info[field] = (level, description_addendum) if description_addendum else level

table_str = make_obj_table(
metadata_schema,
subschema,
field_info=field_info,
field_type=field_type,
src_path=src_path,
tablefmt=tablefmt,
)
Expand Down Expand Up @@ -849,18 +896,20 @@ def make_metadata_table(schema, field_info, src_path=None, tablefmt="github"):
"""
fields = list(field_info.keys())
# The filter function doesn't work here.
metadata_schema = schema["objects"]["metadata"]
field_type = "metadata"
subschema = schema["objects"][field_type]

retained_fields = [f for f in fields if f in metadata_schema.keys()]
dropped_fields = [f for f in fields if f not in metadata_schema.keys()]
retained_fields = [f for f in fields if f in subschema.keys()]
dropped_fields = [f for f in fields if f not in subschema.keys()]
if dropped_fields:
print("Warning: Missing fields: {}".format(", ".join(dropped_fields)))

metadata_schema = {k: v for k, v in metadata_schema.items() if k in retained_fields}
subschema = {k: v for k, v in subschema.items() if k in retained_fields}

table_str = make_obj_table(
metadata_schema,
subschema,
field_info=field_info,
field_type=field_type,
src_path=src_path,
tablefmt=tablefmt,
)
Expand Down Expand Up @@ -908,14 +957,22 @@ def make_subobject_table(schema, object_tuple, field_info, src_path=None, tablef
table_str = make_obj_table(
temp_dict,
field_info=field_info,
field_type="subobject",
src_path=src_path,
tablefmt=tablefmt,
n_values_to_combine=10000, # no combination
)

return table_str


def make_columns_table(schema, column_info, src_path=None, tablefmt="github"):
def make_columns_table(
schema,
column_info,
src_path=None,
tablefmt="github",
n_values_to_combine=15,
):
"""Produce columns table (markdown) based on requested fields.
Parameters
Expand All @@ -936,6 +993,10 @@ def make_columns_table(schema, column_info, src_path=None, tablefmt="github"):
by the "page.file.src_path" variable.
tablefmt : string, optional
The target table format. The default is "github" (GitHub format).
n_values_to_combine : int, optional
When there are many valid values for a given column,
instead of listing each one in the table,
link to the associated glossary entry.
Returns
-------
Expand All @@ -944,22 +1005,23 @@ def make_columns_table(schema, column_info, src_path=None, tablefmt="github"):
"""
fields = list(column_info.keys())
# The filter function doesn't work here.
column_schema = schema["objects"]["columns"]
field_type = "columns"
subschema = schema["objects"][field_type]

retained_fields = [f for f in fields if f in column_schema.keys()]
dropped_fields = [f for f in fields if f not in column_schema.keys()]
retained_fields = [f for f in fields if f in subschema.keys()]
dropped_fields = [f for f in fields if f not in subschema.keys()]
if dropped_fields:
print("Warning: Missing fields: {}".format(", ".join(dropped_fields)))

# Use the "name" field in the table, to allow for filenames to not match
# "names".
df = pd.DataFrame(
index=[column_schema[f]["name"] for f in retained_fields],
index=[subschema[f]["name"] for f in retained_fields],
columns=["**Requirement Level**", "**Data type**", "**Description**"],
)
df.index.name = "**Column name**"
for field in retained_fields:
field_name = column_schema[field]["name"]
field_name = subschema[field]["name"]
requirement_info = column_info[field]
description_addendum = ""
if isinstance(requirement_info, tuple):
Expand All @@ -970,17 +1032,28 @@ def make_columns_table(schema, column_info, src_path=None, tablefmt="github"):
"[DEPRECATED](/02-common-principles.html#definitions)",
)

type_string = utils.resolve_metadata_type(column_schema[field])
type_string = utils.resolve_metadata_type(subschema[field])

description = column_schema[field]["description"] + " " + description_addendum
description = subschema[field]["description"] + " " + description_addendum

description = description.replace("SPEC_ROOT", get_relpath(src_path))
if (
"enum" in subschema[field].keys()
and len(subschema[field]["enum"]) >= n_values_to_combine
):
glossary_entry = f"{GLOSSARY_PATH}.md#objects.{field_type}.{field}"
valid_values_str = (
"For a list of valid values for this field, see the "
f"[associated glossary entry]({glossary_entry})."
)
else:
# Try to add info about valid values
valid_values_str = utils.describe_valid_values(subschema[field])

# Try to add info about valid values
valid_values_str = utils.describe_valid_values(column_schema[field])
if valid_values_str:
description += "\n\n\n\n" + valid_values_str

description = description.replace("SPEC_ROOT", get_relpath(src_path))

df.loc[field_name] = [
normalize_breaks(requirement_info),
type_string,
Expand Down

0 comments on commit 7049f29

Please sign in to comment.