Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Creating pull request for 10.21105.joss.03391 #2808

Merged
merged 2 commits into from
Dec 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
182 changes: 182 additions & 0 deletions joss.03391/10.21105.joss.03391.crossref.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch xmlns="http://www.crossref.org/schema/4.4.0" xmlns:ai="http://www.crossref.org/AccessIndicators.xsd" xmlns:rel="http://www.crossref.org/relations.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="4.4.0" xsi:schemaLocation="http://www.crossref.org/schema/4.4.0 http://www.crossref.org/schemas/crossref4.4.0.xsd">
<head>
<doi_batch_id>b3e02009ae36ac5e12740a84e3cc66f8</doi_batch_id>
<timestamp>20211210015143</timestamp>
<depositor>
<depositor_name>JOSS Admin</depositor_name>
<email_address>admin@theoj.org</email_address>
</depositor>
<registrant>The Open Journal</registrant>
</head>
<body>
<journal>
<journal_metadata>
<full_title>Journal of Open Source Software</full_title>
<abbrev_title>JOSS</abbrev_title>
<issn media_type="electronic">2475-9066</issn>
<doi_data>
<doi>10.21105/joss</doi>
<resource>https://joss.theoj.org</resource>
</doi_data>
</journal_metadata>
<journal_issue>
<publication_date media_type="online">
<month>12</month>
<year>2021</year>
</publication_date>
<journal_volume>
<volume>6</volume>
</journal_volume>
<issue>68</issue>
</journal_issue>
<journal_article publication_type="full_text">
<titles>
<title>Omnizart: A General Toolbox for Automatic Music Transcription</title>
</titles>
<contributors>
<person_name sequence="first" contributor_role="author">
<given_name>Yu-Te</given_name>
<surname>Wu</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>Yin-Jyun</given_name>
<surname>Luo</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>Tsung-Ping</given_name>
<surname>Chen</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>I-Chieh</given_name>
<surname>Wei</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>Jui-Yang</given_name>
<surname>Hsu</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>Yi-Chin</given_name>
<surname>Chuang</surname>
</person_name>
<person_name sequence="additional" contributor_role="author">
<given_name>Li</given_name>
<surname>Su</surname>
</person_name>
</contributors>
<publication_date>
<month>12</month>
<day>10</day>
<year>2021</year>
</publication_date>
<pages>
<first_page>3391</first_page>
</pages>
<publisher_item>
<identifier id_type="doi">10.21105/joss.03391</identifier>
</publisher_item>
<ai:program name="AccessIndicators">
<ai:license_ref applies_to="vor">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
<ai:license_ref applies_to="am">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
<ai:license_ref applies_to="tdm">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
</ai:program>
<rel:program>
<rel:related_item>
<rel:description>Software archive</rel:description>
<rel:inter_work_relation relationship-type="references" identifier-type="doi">“https://doi.org/10.5281/zenodo.5769022”</rel:inter_work_relation>
</rel:related_item>
<rel:related_item>
<rel:description>GitHub review issue</rel:description>
<rel:inter_work_relation relationship-type="hasReview" identifier-type="uri">https://github.com/openjournals/joss-reviews/issues/3391</rel:inter_work_relation>
</rel:related_item>
</rel:program>
<doi_data>
<doi>10.21105/joss.03391</doi>
<resource>https://joss.theoj.org/papers/10.21105/joss.03391</resource>
<collection property="text-mining">
<item>
<resource mime_type="application/pdf">https://joss.theoj.org/papers/10.21105/joss.03391.pdf</resource>
</item>
</collection>
</doi_data>
<citation_list>
<citation key="ref1">
<unstructured_citation>Tensorflow: A system for large-scale machine learning, Abadi, Martı́n and Barham, Paul and Chen, Jianmin and Chen, Zhifeng and Davis, Andy and Dean, Jeffrey and Devin, Matthieu and Ghemawat, Sanjay and Irving, Geoffrey and Isard, Michael and others, USENIX symposium on operating systems design and implementation (OSDI), 265–283, 2016</unstructured_citation>
</citation>
<citation key="ref2">
<doi>10.1109/taslp.2020.3030482</doi>
</citation>
<citation key="ref3">
<unstructured_citation>Hawthorne, Curtis and Stasyuk, Andriy and Roberts, Adam and Simon, Ian and Huang, Cheng-Zhi Anna and Dieleman, Sander and Elsen, Erich and Engel, Jesse H. and Eck, Douglas, Enabling Factorized Piano Music Modeling and Generation with the MAESTRO Dataset, International Conference on Learning Representations (ICLR), 2019</unstructured_citation>
</citation>
<citation key="ref4">
<unstructured_citation>Parmar, Niki and Vaswani, Ashish and Uszkoreit, Jakob and Kaiser, Lukasz and Shazeer, Noam and Ku, Alexander and Tran, Dustin, Image Transformer, Proceedings of the 35th International Conference on Machine Learning (ICML), 4052–4061, 2018</unstructured_citation>
</citation>
<citation key="ref5">
<doi>10.1007/978-3-030-01234-2_49</doi>
</citation>
<citation key="ref6">
<doi>10.1109/taslp.2015.2442411</doi>
</citation>
<citation key="ref7">
<unstructured_citation>On the Potential of Simple Framewise Approaches to Piano Transcription, Kelz, Rainer and Dorfer, Matthias and Korzeniowski, Filip and Böck, Sebastian and Arzt, Andreas and Widmer, Gerhard, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 475–481, 2016</unstructured_citation>
</citation>
<citation key="ref8">
<doi>10.1109/icassp39728.2021.9414409</doi>
</citation>
<citation key="ref9">
<doi>10.1109/icassp.2018.8462079</doi>
</citation>
<citation key="ref10">
<doi>10.1109/icassp.2018.8462420</doi>
</citation>
<citation key="ref11">
<doi>10.1109/access.2019.2960566</doi>
</citation>
<citation key="ref12">
<doi>10.1109/TPAMI.2018.2858821</doi>
</citation>
<citation key="ref13">
<unstructured_citation>Harmony Transformer: Incorporating chord segmentation into harmony recognition, Chen, Tsung-Ping and Su, Li, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 2019</unstructured_citation>
</citation>
<citation key="ref14">
<unstructured_citation>Characterization and melodic similarity of a cappella flamenco cantes, Mora, Joaquı́n and Gómez, Francisco and Gómez, Emilia and Escobar-Borrego, Francisco and Dı́az-Báñez, José Miguel, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 9–13, 2010</unstructured_citation>
</citation>
<citation key="ref15">
<doi>10.1109/tasl.2009.2026503</doi>
</citation>
<citation key="ref16">
<unstructured_citation>Evaluation framework for automatic singing transcription, Molina, E. and Barbancho-Perez, A. M. and Tardón, L. J. and Barbancho-Perez, I. and others, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 2014</unstructured_citation>
</citation>
<citation key="ref17">
<unstructured_citation>ENST-Drums: an extensive audio-visual database for drum signals processing., Gillet, Olivier and Richard, Gaël, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 156–159, 2006</unstructured_citation>
</citation>
<citation key="ref18">
<unstructured_citation>MDB Drums: An annotated subset of MedleyDB for automatic drum transcription, Southall, Carl and Wu, Chih-Wei and Lerch, Alexander and Hockman, Jason, 2017, Late Breaking/Demos of the 18th International Society for Music Information Retrieval Conference (ISMIR)</unstructured_citation>
</citation>
<citation key="ref19">
<unstructured_citation>Beat and Downbeat Tracking of Symbolic Music Data Using Deep Recurrent Neural Networks, Chuang, Yi-Chin and Su, Li, Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC), 346–352, 2020, IEEE</unstructured_citation>
</citation>
<citation key="ref20">
<doi>10.1109/icassp.2018.8461686</doi>
</citation>
<citation key="ref21">
<doi>10.1145/2964284.2973795</doi>
</citation>
<citation key="ref22">
<doi>10.25080/majora-7b98e3ed-003</doi>
</citation>
<citation key="ref23">
<unstructured_citation>Burgoyne, John Ashley and Wild, Jonathan and Fujinaga, Ichiro, An Expert Ground Truth Set for Audio Chord Recognition and Music Analysis, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 633-638, 2011</unstructured_citation>
</citation>
<citation key="ref24">
<unstructured_citation>Mauch, Matthias and Dixon, Simon, Approximate Note Transcription for the Improved Identification of Difficult Chords, Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), 135-140, 2010</unstructured_citation>
</citation>
<citation key="ref25">
<unstructured_citation>Learning Features of Music from Scratch, Thickstun, John and Harchaoui, Zaid and Kakade, Sham M., 2017, International Conference on Learning Representations (ICLR)</unstructured_citation>
</citation>
</citation_list>
</journal_article>
</journal>
</body>
</doi_batch>
Binary file added joss.03391/10.21105.joss.03391.pdf
Binary file not shown.