{shortened_testname}"
- f"
\n{failure['failure_string']}\n
\n \n"
- )
- back_button = f"[back to {branch_name} summary]({f'analysis_{branch_name}'})\n\n"
- with open(
- os.path.join(subfolder, f"analysis_{branch_name}_{repo_name}.md"), "w"
- ) as wf:
- wf.write(
- back_button
- + method_repo_pytests[f"{branch_name}_{repo_name}"]
- + patch_diff
+
+def render_mds(overwrite_previous, subfolder="docs"):
+ leaderboard = {}
+
+ split_to_total_tests = {
+ "lite": 3628,
+ "all": 140926,
+ } # hard-coded to skip running it later
+ for split in tqdm.tqdm(["lite", "all"]):
+ num_repos = len(SPLIT[split])
+ # total_num_tests = 0
+ # for repo_name in SPLIT[split]:
+ # repo_tests = subprocess.run(['commit0', 'get-tests', repo_name], capture_output=True, text=True).stdout.strip()
+ # total_num_tests += len(repo_tests.splitlines())
+ leaderboard[split] = leaderboard_header.format(
+ split=split,
+ num_repos=num_repos,
+ total_num_tests=split_to_total_tests[split],
+ )
+
+ for org_path in tqdm.tqdm(glob.glob(os.path.join(analysis_files_path, "*"))):
+ org_name = os.path.basename(org_path)
+ if org_name in {"blank", "repos", "submission_repos"}:
+ continue
+ for branch_path in glob.glob(os.path.join(org_path, "*.json")):
+ cum_tests_passed = 0
+ repos_resolved = 0
+ total_duration = 0.0
+ branch_metrics = json.load(open(branch_path))
+ submission_info = branch_metrics["submission_info"]
+ split = submission_info["split"]
+ org_name = submission_info["org_name"]
+ project_page_link = submission_info["project_page"]
+ display_name = submission_info["display_name"]
+ submission_date = submission_info["submission_date"]
+ branch_name = submission_info["branch"]
+ org_branch_filepath = os.path.join(
+ subfolder, f"analysis_{org_name}_{branch_name}.md"
+ )
+ write_submission = True
+ if os.path.exists(org_branch_filepath) and not overwrite_previous:
+ write_submission = False
+
+ if write_submission:
+ submission_page = submission_table_header.format(
+ display_name=display_name, split=split
)
- # Render general page. Has buttons to all methods
- leaderboard = """
-| | Name | Summary | |
-|--|--------|----------|--|"""
- # Render method page. Per method, buttons to all repos.
- method_to_repos = {}
- # Render method & repo page. Has "back" button.
- for branch_name, branch_info in all_submissions.items():
- cum_pytests = {"passed": 0}
- method_to_repos[branch_name] = """
-| | Repository | Summary | |
-|-|------------|---------|-|"""
- total_duration = 0.0
- for repo_name, repo_test_info in branch_info.items():
- for testname, test_info in repo_test_info.items():
- if "failed_to_run" in test_info:
- summary_pytests_string = "failure"
- else:
- total_duration += test_info["duration"]
- summary_pytests_string = (
- f"`{testname}`: {test_info['summary']['passed']} / "
- f"{test_info['summary']['collected']} ; duration: {test_info['duration']:.2f}s"
+ for repo_name, repo_pytest_results in branch_metrics.items():
+ if repo_name == "submission_info":
+ continue
+ if write_submission:
+ submission_repo_page = f"# **{display_name}**: {repo_name}"
+ org_branch_repo_filepath = os.path.join(
+ subfolder, f"analysis_{org_name}_{branch_name}_{repo_name}.md"
)
- for category, count in test_info["summary"].items():
- if category not in cum_pytests:
- cum_pytests[category] = 0
- if isinstance(count, int):
- cum_pytests[category] += int(count)
- elif isinstance(count, float):
- cum_pytests[category] += float(count)
- method_to_repos[branch_name] += (
- f"\n||[{repo_name}]({f'analysis_{branch_name}_{repo_name}'})|"
- f"{summary_pytests_string}||"
- )
- break # assume we ran all tests. will add functionality for checking diff tests later, as we need it.
- summary_pytests_string = (
- f"{cum_pytests['passed']} / {cum_pytests['collected']} ; duration: {total_duration:.2f}s"
- )
- leaderboard += f"\n||[{branch_name}]({f'analysis_{branch_name}'})|{summary_pytests_string}||"
- back_button = f"[back to all submissions]({f'analysis'})\n\n"
- with open(os.path.join(subfolder, f"analysis_{branch_name}.md"), "w") as wf:
- wf.write(back_button + "\n" + method_to_repos[branch_name])
- with open(os.path.join(subfolder, "analysis.md"), "w") as wf:
- wf.write(leaderboard)
+ if isinstance(repo_pytest_results, str):
+ submission_repo_page = f"# **{display_name}**: {repo_name}\n\n## Failed to clone\n\n{repo_pytest_results}"
+ org_branch_repo_filepath = os.path.join(
+ subfolder, f"analysis_{org_name}_{branch_name}_{repo_name}.md"
+ )
+ github_hyperlink = (
+ f"{project_page_link}/{repo_name}/tree/{branch_name}"
+ )
+ if branch_name == "reference":
+ github_hyperlink = f"{project_page_link}/{repo_name}"
+ submission_page = submission_table_header.format(
+ display_name=display_name, split=split
+ ) + (
+ f"\n| {repo_name} | No; Failed to clone. | - | - | "
+ f"[Analysis](/{f'analysis_{org_name}_{branch_name}_{repo_name}'}) | "
+ f"[Github]({github_hyperlink}) |"
+ )
+ back_button = f"[back to {display_name} summary](/{f'analysis_{org_name}_{branch_name}'})\n\n"
+ with open(org_branch_repo_filepath, "w") as wf:
+ wf.write(back_button + submission_repo_page)
+ continue
+
+ for pytest_group, pytest_info in repo_pytest_results.items():
+ pytest_group = os.path.basename(pytest_group.strip("/"))
+ patch_diff = f"""\n\n## Patch diff\n```diff\n{pytest_info['patch_diff']}```"""
+ if "failed_to_run" in pytest_info:
+ resolved = False
+ if write_submission:
+ submission_repo_page += (
+ f"\n## Failed to run pytests for test `{pytest_group}`\n"
+ f"```\n{pytest_info['failed_to_run']}\n```"
+ )
+ pytest_details = "Pytest failed"
+ duration = "Failed."
+ else:
+ resolved = False
+ if "passed" in pytest_info["summary"]:
+ if "skipped" in pytest_info["summary"]:
+ resolved = pytest_info["summary"]["passed"] + pytest_info["summary"]["skipped"] == pytest_info["summary"]["total"]
+ else:
+ resolved = pytest_info["summary"]["passed"] == pytest_info["summary"]["total"]
+ if write_submission:
+ submission_repo_page += pytest_summary_table_header.format(
+ pytest_group=pytest_group
+ )
+ for category, count in pytest_info["summary"].items():
+ if category not in {"duration"}:
+ submission_repo_page += (
+ f"""| {category} | {count} |\n"""
+ )
+ else:
+ submission_repo_page += (
+ f"""| {category} | {float(count):.2f}s |\n"""
+ )
+
+ submission_repo_page += "\n## Failed pytests:\n\n"
+ for testname, failure in pytest_info["failures"].items():
+ shortened_testname = os.path.basename(testname)
+ submission_repo_page += (
+ f"### {shortened_testname}\n\n