diff --git a/.xperior/testds/motr-single_tests.yaml b/.xperior/testds/motr-single_tests.yaml index cda557d09c0..b8c5d57ad76 100644 --- a/.xperior/testds/motr-single_tests.yaml +++ b/.xperior/testds/motr-single_tests.yaml @@ -19,8 +19,1268 @@ --- Tests: - - id : 00userspace-tests - script : 'm0 run-ut' + - id : 00userspace-tests_libm0-ut + script : 'm0 run-ut -t libm0-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-base + script : 'm0 run-ut -t addb2-base' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-consumer + script : 'm0 run-ut -t addb2-consumer' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-histogram + script : 'm0 run-ut -t addb2-histogram' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-net + script : 'm0 run-ut -t addb2-net' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-storage + script : 'm0 run-ut -t addb2-storage' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_addb2-sys + script : 'm0 run-ut -t addb2-sys' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_di-ut + script : 'm0 run-ut -t di-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_balloc-ut + script : 'm0 run-ut -t balloc-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_be-ut + script : 'm0 run-ut -t be-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_buffer_pool_ut + script : 'm0 run-ut -t buffer_pool_ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_bulk-client-ut + script : 'm0 run-ut -t bulk-client-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_bulk-server-ut + script : 'm0 run-ut -t bulk-server-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_bytecount-ut + script : 'm0 run-ut -t bytecount-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm0-ut + script : 'm0 run-ut -t dtm0-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_btree-ut + script : 'm0 run-ut -t btree-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_capa-ut + script : 'm0 run-ut -t capa-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cas-client + script : 'm0 run-ut -t cas-client' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cas-service + script : 'm0 run-ut -t cas-service' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_client-ut + script : 'm0 run-ut -t client-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_obj-ut + script : 'm0 run-ut -t obj-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_io-ut + script : 'm0 run-ut -t io-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_io-nw-xfer-ut + script : 'm0 run-ut -t io-nw-xfer-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_io-pargrp-ut + script : 'm0 run-ut -t io-pargrp-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_io-req-ut + script : 'm0 run-ut -t io-req-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_io-req-fop-ut + script : 'm0 run-ut -t io-req-fop-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_sync-ut + script : 'm0 run-ut -t sync-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_idx-ut + script : 'm0 run-ut -t idx-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_idx-dix + script : 'm0 run-ut -t idx-dix' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_idx-dix-mt + script : 'm0 run-ut -t idx-dix-mt' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_layout-ut + script : 'm0 run-ut -t layout-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_helpers-ufid-ut + script : 'm0 run-ut -t helpers-ufid-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cm-cp-ut + script : 'm0 run-ut -t cm-cp-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cm-ut + script : 'm0 run-ut -t cm-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cob-ut + script : 'm0 run-ut -t cob-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_cob-foms-ut + script : 'm0 run-ut -t cob-foms-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-ut + script : 'm0 run-ut -t conf-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-load-ut + script : 'm0 run-ut -t conf-load-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-pvers-ut + script : 'm0 run-ut -t conf-pvers-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_confc-ut + script : 'm0 run-ut -t confc-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-glob-ut + script : 'm0 run-ut -t conf-glob-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-diter-ut + script : 'm0 run-ut -t conf-diter-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_confstr-ut + script : 'm0 run-ut -t confstr-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-validation-ut + script : 'm0 run-ut -t conf-validation-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_conf-walk-ut + script : 'm0 run-ut -t conf-walk-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rconfc-ut + script : 'm0 run-ut -t rconfc-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-connection-ut + script : 'm0 run-ut -t rpc-connection-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dix-client-ut + script : 'm0 run-ut -t dix-client-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dix-cm-iter + script : 'm0 run-ut -t dix-cm-iter' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm-nucleus-ut + script : 'm0 run-ut -t dtm-nucleus-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm-transmit-ut + script : 'm0 run-ut -t dtm-transmit-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm-dtx-ut + script : 'm0 run-ut -t dtm-dtx-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm0-clk-src-ut + script : 'm0 run-ut -t dtm0-clk-src-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_dtm0-log-ut + script : 'm0 run-ut -t dtm0-log-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_failure_domains_tree-ut + script : 'm0 run-ut -t failure_domains_tree-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_failure_domains-ut + script : 'm0 run-ut -t failure_domains-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fis-ut + script : 'm0 run-ut -t fis-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-filterc-ut + script : 'm0 run-ut -t fdmi-filterc-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-pd-ut + script : 'm0 run-ut -t fdmi-pd-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-sd-ut + script : 'm0 run-ut -t fdmi-sd-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-fol-ut + script : 'm0 run-ut -t fdmi-fol-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-fol-fini-ut + script : 'm0 run-ut -t fdmi-fol-fini-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fdmi-filter-eval-ut + script : 'm0 run-ut -t fdmi-filter-eval-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fit-ut + script : 'm0 run-ut -t fit-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fol-ut + script : 'm0 run-ut -t fol-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fom-timedwait-ut + script : 'm0 run-ut -t fom-timedwait-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-formation-ut + script : 'm0 run-ut -t rpc-formation-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_ha-ut + script : 'm0 run-ut -t ha-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_ha-state-ut + script : 'm0 run-ut -t ha-state-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_ios-bufferpool-ut + script : 'm0 run-ut -t ios-bufferpool-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_isc-api-ut + script : 'm0 run-ut -t isc-api-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_isc-service-ut + script : 'm0 run-ut -t isc-service-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-item-ut + script : 'm0 run-ut -t rpc-item-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-item-source-ut + script : 'm0 run-ut -t rpc-item-source-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_layout-ut + script : 'm0 run-ut -t layout-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_layout-access-plan-ut + script : 'm0 run-ut -t layout-access-plan-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-link-ut + script : 'm0 run-ut -t rpc-link-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fop-lock-ut + script : 'm0 run-ut -t fop-lock-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_fom-stats-ut + script : 'm0 run-ut -t fom-stats-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-bulk-if + script : 'm0 run-ut -t net-bulk-if' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-bulk-mem + script : 'm0 run-ut -t net-bulk-mem' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-lnet + script : 'm0 run-ut -t net-lnet' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_libfab-ut + script : 'm0 run-ut -t libfab-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-misc-ut + script : 'm0 run-ut -t net-misc-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-module + script : 'm0 run-ut -t net-module' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-test + script : 'm0 run-ut -t net-test' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_net-prov-ut + script : 'm0 run-ut -t net-prov-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_m0d-ut + script : 'm0 run-ut -t m0d-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_mdservice-ut + script : 'm0 run-ut -t mdservice-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_module-ut + script : 'm0 run-ut -t module-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_ms-fom-ut + script : 'm0 run-ut -t ms-fom-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-packet-encdec-ut + script : 'm0 run-ut -t rpc-packet-encdec-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_parity_math-ut + script : 'm0 run-ut -t parity_math-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_parity_math_ssse3-ut + script : 'm0 run-ut -t parity_math_ssse3-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_poolmach-ut + script : 'm0 run-ut -t poolmach-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_reqh-ut + script : 'm0 run-ut -t reqh-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_reqh-fop-allow-ut + script : 'm0 run-ut -t reqh-fop-allow-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_reqh-service-ut + script : 'm0 run-ut -t reqh-service-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_reqh-service-ctx-ut + script : 'm0 run-ut -t reqh-service-ctx-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rm-ut + script : 'm0 run-ut -t rm-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rm-rcredits-ut + script : 'm0 run-ut -t rm-rcredits-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rm-rwlock-ut + script : 'm0 run-ut -t rm-rwlock-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-at + script : 'm0 run-ut -t rpc-at' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-machine-ut + script : 'm0 run-ut -t rpc-machine-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-rcv-session-ut + script : 'm0 run-ut -t rpc-rcv-session-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-lib-ut + script : 'm0 run-ut -t rpc-lib-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-conn-pool-ut + script : 'm0 run-ut -t rpc-conn-pool-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_rpc-session-ut + script : 'm0 run-ut -t rpc-session-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_sm-ut + script : 'm0 run-ut -t sm-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_snscm_xform-ut + script : 'm0 run-ut -t snscm_xform-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_snscm_storage-ut + script : 'm0 run-ut -t snscm_storage-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_sns-cm-repair-ut + script : 'm0 run-ut -t sns-cm-repair-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_snscm_net-ut + script : 'm0 run-ut -t snscm_net-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_sns-file-lock-ut + script : 'm0 run-ut -t sns-file-lock-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_spiel-ut + script : 'm0 run-ut -t spiel-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_spiel-ci-ut + script : 'm0 run-ut -t spiel-ci-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_sss-ut + script : 'm0 run-ut -t sss-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_stats-ut + script : 'm0 run-ut -t stats-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_spiel-conf-ut + script : 'm0 run-ut -t spiel-conf-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_stob-ut + script : 'm0 run-ut -t stob-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_storage-dev-ut + script : 'm0 run-ut -t storage-dev-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_udb-ut + script : 'm0 run-ut -t udb-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_xcode_bufvec_fop-ut + script : 'm0 run-ut -t xcode_bufvec_fop-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_ff2c-ut + script : 'm0 run-ut -t ff2c-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_xcode-ut + script : 'm0 run-ut -t xcode-ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_pi_ut + script : 'm0 run-ut -t pi_ut' + dir : src/scripts + executor : Xperior::Executor::MotrTest + #executor : Xperior::Executor::Skip + sandbox : /var/motr/m0ut + groupname: 01motr-single-node + polltime : 15 + timeout : 2400 + + - id : 00userspace-tests_libconsole-ut + script : 'm0 run-ut -t libconsole-ut' dir : src/scripts executor : Xperior::Executor::MotrTest #executor : Xperior::Executor::Skip diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 05c4eb4f6ee..ce508c10fba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -175,5 +175,5 @@ Refer to the [Motr Coding Style Guide](../dev/doc/coding-style.md) and the [CORT You can reach out to us with your questions, feedback, and comments through our CORTX Communication Channels: -- Join our CORTX-Open Source Slack Channel to interact with your fellow community members and gets your questions answered. [![Slack Channel](https://img.shields.io/badge/chat-on%20Slack-blue)](https://join.slack.com/t/cortxcommunity/shared_invite/zt-femhm3zm-yiCs5V9NBxh89a_709FFXQ?) +- Join our CORTX-Open Source Slack Channel to interact with community members and gets your questions answered. [![Slack Channel](https://img.shields.io/badge/chat-on%20Slack-blue)](https://join.slack.com/t/cortxcommunity/shared_invite/zt-femhm3zm-yiCs5V9NBxh89a_709FFXQ?) - If you'd like to contact us directly, drop us a mail at cortx-questions@seagate.com. diff --git a/SUPPORT.md b/SUPPORT.md index 579cc597c4b..ade5842bfad 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -3,6 +3,6 @@ Looking for support for CORTX parent or a repository ? Consider some of these resources: - Join our CORTX-Open Source Slack channel [![Slack](https://img.shields.io/badge/chat-on%20Slack-blue")](https://join.slack.com/t/cortxcommunity/shared_invite/zt-femhm3zm-yiCs5V9NBxh89a_709FFXQ?) to interact with community members and gets your questions answered. -- Join [GitHub Discussions](https://github.com/Seagate/cortx-motr/discussions) to ask, answer, and discuss topics with your fellow CORTX contributors! +- Join [GitHub Discussions](https://github.com/Seagate/cortx-motr/discussions) to ask, answer, and discuss topics with CORTX contributors! - If you'd like to contact us directly, drop us a mail at [cortx-questions@seagate.com](mailto:cortx-questions@seagate.com) . - We like to highlight the work and contributions of our community members—if you have solved an interesting challenge, or you are interested in sharing your experience or use cases, we want to talk to you! Please email our Community Manager [rachel.novak@seagate.com](mailto:rachel.novak@seagate.com) or [schedule a meeting with us](https://outlook.office365.com/owa/calendar/CORTXCommunity@seagate.com/bookings/s/x8yMn2ODxUCOdhxvXkH4FA2) to share. diff --git a/be/btree.c b/be/btree.c index ec31a8d3bb9..7581f1701f4 100644 --- a/be/btree.c +++ b/be/btree.c @@ -1893,10 +1893,10 @@ M0_INTERNAL void m0_btree_lrulist_set_lru_config(int64_t slow_lru_mem_release, M0_ASSERT(lru_space_wm_high >= lru_space_wm_target && lru_space_wm_target >= lru_space_wm_low); - M0_LOG(M0_INFO, "Btree LRU List Watermarks: Low - %"PRIi64" Mid - " + M0_LOG(M0_NOTICE, "Btree LRU List Watermarks: Low - %"PRIi64" Mid - " "%"PRIi64" High - %"PRIi64" \n", lru_space_wm_low, lru_space_wm_target, lru_space_wm_high); - M0_LOG(M0_INFO, "Btree LRU List trickle release: %s \n", + M0_LOG(M0_NOTICE, "Btree LRU List trickle release: %s \n", lru_trickle_release_en ? "true" : "false"); } @@ -2025,7 +2025,6 @@ static int64_t tree_get(struct node_op *op, struct segaddr *addr, int nxt) return nxt; } - /** * Returns the tree to the free tree pool if the reference count for this tree * reaches zero. @@ -8780,7 +8779,7 @@ M0_INTERNAL int64_t m0_btree_lrulist_purge_check(enum m0_btree_purge_user user, if (lru_space_used < lru_space_wm_low) { /** Do nothing. */ if (user == M0_PU_EXTERNAL) - M0_LOG(M0_INFO, "Skipping memory release since used " + M0_LOG(M0_ALWAYS, "Skipping memory release since used " "space is below threshold requested size=%"PRId64 " used space=%"PRId64, size, lru_space_used); lru_trickle_release_mode = false; @@ -8806,7 +8805,7 @@ M0_INTERNAL int64_t m0_btree_lrulist_purge_check(enum m0_btree_purge_user user, purged_size = m0_btree_lrulist_purge(size_to_purge, size_to_purge != 0 ? 0 : M0_BTREE_TRICKLE_NUM_NODES); - M0_LOG(M0_INFO, " Below critical External user Purge," + M0_LOG(M0_ALWAYS, " Below critical External user Purge," " requested size=%"PRId64" used space=%"PRId64 " purged size=%"PRId64, size, lru_space_used, purged_size); @@ -8827,7 +8826,7 @@ M0_INTERNAL int64_t m0_btree_lrulist_purge_check(enum m0_btree_purge_user user, purged_size = m0_btree_lrulist_purge(size_to_purge, (lru_trickle_release_mode && size_to_purge == 0) ? M0_BTREE_TRICKLE_NUM_NODES : 0); - M0_LOG(M0_INFO, " Above critical purge, User=%s requested size=" + M0_LOG(M0_ALWAYS, " Above critical purge, User=%s requested size=" "%"PRId64" used space=%"PRIu64" purged size=" "%"PRIu64, user == M0_PU_BTREE ? "btree" : "external", size, lru_space_used, purged_size); diff --git a/cas/ctg_store.c b/cas/ctg_store.c index 0b0b56642d3..4f1c3fb7e80 100644 --- a/cas/ctg_store.c +++ b/cas/ctg_store.c @@ -169,6 +169,10 @@ static int ctg_op_exec (struct m0_ctg_op *ctg_op, int next_phase); static int ctg_meta_exec (struct m0_ctg_op *ctg_op, const struct m0_fid *fid, int next_phase); +static int ctg_dead_exec (struct m0_ctg_op *ctg_op, + struct m0_cas_ctg *ctg, + const struct m0_buf *key, + int next_phase); static int ctg_exec (struct m0_ctg_op *ctg_op, struct m0_cas_ctg *ctg, const struct m0_buf *key, @@ -479,7 +483,7 @@ int m0_ctg_create(struct m0_be_seg *seg, struct m0_be_tx *tx, bt.vsize = sizeof(struct meta_value); break; case CTT_DEADIDX: - bt.ksize = sizeof(struct meta_value); + bt.ksize = sizeof(struct generic_key *) + sizeof(void *); bt.vsize = sizeof(void *); break; case CTT_CTIDX: @@ -966,6 +970,11 @@ static void ctg_store_release(struct m0_ref *ref) M0_ENTRY(); m0_mutex_fini(&ctg_store->cs_state_mutex); + /* TODO: Clean up every index in memory tree allocation upon any CAS + operation on the index */ + ctg_fini(ctg_store->cs_state->cs_meta); + ctg_fini(ctg_store->cs_ctidx); + ctg_fini(ctg_store->cs_dead_index); ctg_store->cs_state = NULL; ctg_store->cs_ctidx = NULL; m0_long_lock_fini(&ctg_store->cs_del_lock); @@ -1382,7 +1391,7 @@ static int ctg_op_exec_normal(struct m0_ctg_op *ctg_op, int next_phase) * m0_be_btree_insert_inplace() have 0 there. */ - vsize = sizeof(struct generic_value); + vsize = sizeof(void *); rec.r_key.k_data = M0_BUFVEC_INIT_BUF(&k_ptr, &ksize); rec.r_val = M0_BUFVEC_INIT_BUF(&v_ptr, &vsize); rec.r_crc_type = M0_BCT_NO_CRC; @@ -1425,6 +1434,8 @@ static int ctg_op_exec_normal(struct m0_ctg_op *ctg_op, int next_phase) &kv_op, tx)); m0_be_op_done(beop); break; + case CTG_OP_COMBINE(CO_DEL, CT_DEAD_INDEX): + ksize = sizeof(struct generic_key *) + sizeof(void *); case CTG_OP_COMBINE(CO_DEL, CT_BTREE): case CTG_OP_COMBINE(CO_DEL, CT_META): m0_be_op_active(beop); @@ -1649,13 +1660,49 @@ M0_INTERNAL int m0_ctg_dead_index_insert(struct m0_ctg_op *ctg_op, struct m0_cas_ctg *ctg, int next_phase) { - ctg_op->co_ctg = m0_ctg_dead_index(); - ctg_op->co_ct = CT_DEAD_INDEX; ctg_op->co_opcode = CO_PUT; /* Dead index value is empty */ ctg_op->co_val = M0_BUF_INIT0; /* Dead index key is a pointer to a catalogue */ - return ctg_exec(ctg_op, ctg, &M0_BUF_INIT_PTR(&ctg), next_phase); + return ctg_dead_exec(ctg_op, ctg, &M0_BUF_INIT_PTR(&ctg), next_phase); +} + +M0_INTERNAL int m0_ctg_dead_delete(struct m0_ctg_op *ctg_op, + struct m0_cas_ctg *ctg, + const struct m0_buf *key, + int next_phase) +{ + M0_PRE(ctg_op != NULL); + M0_PRE(ctg != NULL); + M0_PRE(key != NULL); + M0_PRE(ctg_op->co_beop.bo_sm.sm_state == M0_BOS_INIT); + + ctg_op->co_opcode = CO_DEL; + + return ctg_dead_exec(ctg_op, ctg, key, next_phase); +} + +static int ctg_dead_exec(struct m0_ctg_op *ctg_op, + struct m0_cas_ctg *ctg, + const struct m0_buf *key, + int next_phase) +{ + int ret = M0_FSO_AGAIN; + + ctg_op->co_ctg = m0_ctg_dead_index(); + ctg_op->co_ct = CT_DEAD_INDEX; + + if (!M0_IN(ctg_op->co_opcode, (CO_MIN, CO_TRUNC, CO_DROP)) && + (ctg_op->co_opcode != CO_CUR || + ctg_op->co_cur_phase != CPH_NEXT)) + ctg_op->co_rc = ctg_kbuf_get(&ctg_op->co_key, key, true); + + if (ctg_op->co_rc != 0) + m0_fom_phase_set(ctg_op->co_fom, next_phase); + else + ret = ctg_op_exec(ctg_op, next_phase); + + return ret; } static int ctg_exec(struct m0_ctg_op *ctg_op, @@ -1666,7 +1713,7 @@ static int ctg_exec(struct m0_ctg_op *ctg_op, int ret = M0_FSO_AGAIN; ctg_op->co_ctg = ctg; - ctg_op->co_ct = CT_BTREE; + ctg_op->co_ct = CT_BTREE; if (!M0_IN(ctg_op->co_opcode, (CO_MIN, CO_TRUNC, CO_DROP)) && (ctg_op->co_opcode != CO_CUR || diff --git a/cas/ctg_store.h b/cas/ctg_store.h index ab5bb859c04..3b4a80c47ea 100644 --- a/cas/ctg_store.h +++ b/cas/ctg_store.h @@ -390,6 +390,18 @@ M0_INTERNAL int m0_ctg_dead_index_insert(struct m0_ctg_op *ctg_op, struct m0_cas_ctg *ctg, int next_phase); +/** + * Deletes 'ctg' from "dead index" catalogue. + * + * @param ctg_op Catalogue operation context. + * @param ctg Catalogue to be deleted from "dead index" catalogue. + * @param next_phase Next phase of caller FOM. + */ +M0_INTERNAL int m0_ctg_dead_delete(struct m0_ctg_op *ctg_op, + struct m0_cas_ctg *ctg, + const struct m0_buf *key, + int next_phase); + /** * Looks up a catalogue in meta catalogue. * diff --git a/cas/index_gc.c b/cas/index_gc.c index 2ed530ba046..2d1df6c05c9 100644 --- a/cas/index_gc.c +++ b/cas/index_gc.c @@ -361,6 +361,12 @@ static int cgc_fom_tick(struct m0_fom *fom0) fom->cg_ctg_op_initialized = true; result = m0_ctg_drop(ctg_op, fom->cg_ctg, CGC_LOCK_DEAD_INDEX); + /* + * Free the memory allocated for the root node after + * destroying the tree. + */ + if (result == M0_FSO_AGAIN) + m0_free0(&fom->cg_ctg->cc_tree); } else { M0_LOG(M0_DEBUG, "out of credits, commit & restart"); m0_long_unlock(m0_ctg_lock(m0_ctg_dead_index()), @@ -387,14 +393,13 @@ static int cgc_fom_tick(struct m0_fom *fom0) m0_ctg_op_fini(ctg_op); m0_ctg_op_init(ctg_op, fom0, 0); fom->cg_ctg_op_initialized = true; - fom->cg_ctg_key = M0_BUF_INIT(M0_CAS_CTG_KEY_HDR_SIZE, - &fom->cg_ctg); + fom->cg_ctg_key = M0_BUF_INIT_PTR(&fom->cg_ctg); /* * Now completely forget this ctg by deleting its descriptor * from "dead index" catalogue. */ - result = m0_ctg_delete(ctg_op, m0_ctg_dead_index(), - &fom->cg_ctg_key, CGC_SUCCESS); + result = m0_ctg_dead_delete(ctg_op, m0_ctg_dead_index(), + &fom->cg_ctg_key, CGC_SUCCESS); break; case CGC_SUCCESS: m0_long_unlock(m0_ctg_lock(m0_ctg_dead_index()), diff --git a/cas/service.c b/cas/service.c index 6558af0ef4d..5489a4edaf1 100644 --- a/cas/service.c +++ b/cas/service.c @@ -1151,6 +1151,22 @@ static int cas_dtm0_logrec_add(struct m0_fom *fom0, int i; int rc; + /* + * It is impossible to commit a transaction without DTM0 service up and + * running. + */ + if (dtms == NULL) { + static uint32_t count = 0; + if (count == 0) { + M0_LOG(M0_FATAL, "DTM is enabled but is not " + "configured in conf. Skip " + "DTM now. Please Check!"); + count++; /* Only print the message at the first time. */ + } + return 0; /* FIXME but now let's skip it if no DTM service. */ + } + M0_ASSERT(dtms != NULL); + for (i = 0; i < msg->dtd_ps.dtp_nr; ++i) { if (m0_fid_eq(&msg->dtd_ps.dtp_pa[i].p_fid, &dtms->dos_generic.rs_service_fid)) { diff --git a/cas/ut/client_ut.c b/cas/ut/client_ut.c index 3194c4c30dd..235650c3773 100644 --- a/cas/ut/client_ut.c +++ b/cas/ut/client_ut.c @@ -89,6 +89,8 @@ static char *cas_startup_cmd[] = { "-w", "10", "-F", "-f", M0_UT_CONF_PROCESS, "-c", M0_SRC_PATH("cas/ut/conf.xc") + /* FIXME If DTM is enabled, the above conf.xc must be updated to include + * DTM0 services. */ }; static const char *cdbnames[] = { "cas1" }; diff --git a/cas/ut/service_ut.c b/cas/ut/service_ut.c index 66c08723595..2627a4154f9 100644 --- a/cas/ut/service_ut.c +++ b/cas/ut/service_ut.c @@ -491,8 +491,10 @@ static void meta_fop_submit(struct m0_fop_type *fopt, fop_submit(fopt, &m0_cas_meta_fid, recs); - for (i = 0; i < meta_recs_num; i++) + for (i = 0; i < meta_recs_num; i++) { m0_rpc_at_fini(&recs[i].cr_key); + m0_free(recs[i].cr_key.u.ab_buf.b_addr); + } m0_free(recs); } @@ -592,6 +594,9 @@ static void create(void) init(); meta_fid_submit(&cas_put_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -624,6 +629,11 @@ static void cctg_create(void) meta_cid_submit(&cas_put_fopt, &cid2); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); m0_dix_ldesc_fini(desc); + /* Cleaning up allocated memory to avoid leaks. */ + meta_cid_submit(&cas_del_fopt, &cid1); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + meta_cid_submit(&cas_del_fopt, &cid2); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -652,6 +662,9 @@ static void cctg_create_lookup(void) meta_cid_submit(&cas_get_fopt, &cid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); m0_dix_ldesc_fini(desc); + /* Cleaning up allocated memory to avoid leaks. */ + meta_cid_submit(&cas_del_fopt, &cid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -705,6 +718,9 @@ static void create_lookup(void) M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); meta_fid_submit(&cas_get_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -720,6 +736,9 @@ static void create_create(void) M0_UT_ASSERT(rep_check(0, -EEXIST, BUNSET, BUNSET)); meta_fid_submit(&cas_get_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -754,6 +773,9 @@ static void recreate(void) M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); meta_fid_submit(&cas_get_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -775,6 +797,9 @@ static void meta_cur_1(void) M0_UT_ASSERT(rep.cgr_rep.cr_nr == 1); M0_UT_ASSERT(rep_check(0, 1, BSET, BUNSET)); M0_UT_ASSERT(m0_fid_eq(repv[0].cr_key.u.ab_buf.b_addr, &ifid)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -797,6 +822,9 @@ static void meta_cur_eot(void) M0_UT_ASSERT(rep_check(0, 1, BSET, BUNSET)); M0_UT_ASSERT(rep_check(1, -ENOENT, BUNSET, BUNSET)); M0_UT_ASSERT(m0_fid_eq(repv[0].cr_key.u.ab_buf.b_addr, &ifid)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -816,6 +844,9 @@ static void meta_cur_0(void) 1); M0_UT_ASSERT(rep.cgr_rc == 0); M0_UT_ASSERT(rep.cgr_rep.cr_nr == 0); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -857,6 +888,9 @@ static void meta_cur_none(void) M0_UT_ASSERT(rep_check(1, 0, BUNSET, BUNSET)); M0_UT_ASSERT(rep_check(2, 0, BUNSET, BUNSET)); M0_UT_ASSERT(rep_check(3, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -894,6 +928,9 @@ static void meta_cur_all(void) M0_UT_ASSERT(m0_fid_eq(repv[2].cr_key.u.ab_buf.b_addr, &m0_cas_dead_index_fid)); M0_UT_ASSERT(m0_fid_eq(repv[3].cr_key.u.ab_buf.b_addr, &fid)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &fid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1020,6 +1057,9 @@ static void insert(void) M0_UT_ASSERT(rep.cgr_rc == 0); M0_UT_ASSERT(rep.cgr_rep.cr_nr == 1); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1043,6 +1083,9 @@ static void insert_lookup(void) == sizeof (uint64_t)); M0_UT_ASSERT(2 == *(uint64_t *)rep.cgr_rep.cr_rec[0].cr_val.u.ab_buf.b_addr); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1061,6 +1104,9 @@ static void insert_delete(void) M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); index_op(&cas_get_fopt, &ifid, 1, NOVAL); M0_UT_ASSERT(rep_check(0, -ENOENT, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1075,6 +1121,10 @@ static void lookup_none(void) index_op(&cas_put_fopt, &ifid, 1, 2); index_op(&cas_get_fopt, &ifid, 3, NOVAL); M0_UT_ASSERT(rep_check(0, -ENOENT, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + fini(); } @@ -1101,6 +1151,9 @@ static void empty_value(void) M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); index_op(&cas_get_fopt, &ifid, 1, NOVAL); M0_UT_ASSERT(rep_check(0, -ENOENT, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1118,6 +1171,10 @@ static void insert_2(void) M0_UT_ASSERT(rep_check(0, -EEXIST, BUNSET, BUNSET)); index_op(&cas_get_fopt, &ifid, 1, NOVAL); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + fini(); } @@ -1131,6 +1188,9 @@ static void delete_2(void) M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); index_op(&cas_del_fopt, &ifid, 1, NOVAL); M0_UT_ASSERT(rep_check(0, -ENOENT, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1182,6 +1242,9 @@ static void lookup_N(void) meta_fid_submit(&cas_put_fopt, &ifid); insert_odd(&ifid); lookup_all(&ifid); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1213,6 +1276,9 @@ static void lookup_restart(void) m0_cas__ut_svc_be_set(cas, &be.but_dom); m0_reqh_service_start(cas); lookup_all(&ifid); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1268,6 +1334,9 @@ static void cur_N(void) M0_UT_ASSERT(rep_check(k, -ENOENT, BUNSET, BUNSET)); M0_UT_ASSERT(rep.cgr_rep.cr_nr == INSERTS); } + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1340,6 +1409,9 @@ static void meta_lookup_fail(void) /* Lookup without ENOMEM returns record. */ meta_fid_submit(&cas_get_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1355,6 +1427,9 @@ static void meta_delete_fail(void) /* Lookup should return record. */ meta_fid_submit(&cas_get_fopt, &ifid); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1379,6 +1454,9 @@ static void insert_fail(void) M0_UT_ASSERT(rep.cgr_rep.cr_nr == 1); M0_UT_ASSERT(rep.cgr_rep.cr_rec[0].cr_rc == -ENOENT); M0_UT_ASSERT(rep.cgr_rep.cr_rec[0].cr_val.u.ab_buf.b_addr == NULL); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1409,6 +1487,9 @@ static void lookup_fail(void) M0_UT_ASSERT(rep.cgr_rc == 0); M0_UT_ASSERT(repv[0].cr_val.u.ab_buf.b_nob == sizeof (uint64_t)); M0_UT_ASSERT(*(uint64_t *)repv[0].cr_val.u.ab_buf.b_addr == 2); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1450,6 +1531,9 @@ static void delete_fail(void) M0_UT_ASSERT(rep.cgr_rep.cr_nr == 1); M0_UT_ASSERT(rep.cgr_rep.cr_rec[0].cr_rc == -ENOENT); M0_UT_ASSERT(rep.cgr_rep.cr_rec[0].cr_val.u.ab_buf.b_addr == NULL); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1523,6 +1607,9 @@ static void cur_fail(void) for (i = 2; i < MULTI_INS - 1; i++) M0_UT_ASSERT(repv[i].cr_rc == 0); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1590,6 +1677,9 @@ static void multi_insert(void) M0_UT_ASSERT(rep.cgr_rep.cr_nr == MULTI_INS - 1); M0_UT_ASSERT(m0_forall(i, MULTI_INS - 1, rep.cgr_rep.cr_rec[i].cr_rc == 0)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1619,6 +1709,9 @@ static void multi_lookup(void) rep.cgr_rep.cr_rec[i].cr_rc == 0)); M0_UT_ASSERT(m0_forall(i, MULTI_INS - 1, *(uint64_t *)repv[i].cr_val.u.ab_buf.b_addr == i * i)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1654,6 +1747,9 @@ static void multi_delete(void) rep.cgr_rep.cr_rec[i].cr_rc == -ENOENT)); M0_UT_ASSERT(m0_forall(i, MULTI_INS - 1, repv[i].cr_val.u.ab_buf.b_addr == NULL)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1679,6 +1775,9 @@ static void multi_insert_fail(void) i % 2 ? rep.cgr_rep.cr_rec[i].cr_rc == 0 : rep.cgr_rep.cr_rec[i].cr_rc == -ENOMEM)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1714,6 +1813,9 @@ static void multi_lookup_fail(void) i % 2 ? *(uint64_t *)repv[i].cr_val.u.ab_buf.b_addr == i*i : repv[i].cr_val.u.ab_buf.b_addr == NULL)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1757,6 +1859,9 @@ static void multi_delete_fail(void) i % 2 ? repv[i].cr_val.u.ab_buf.b_addr == NULL : *(uint64_t *)repv[i].cr_val.u.ab_buf.b_addr == i * i)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fid_submit(&cas_del_fopt, &ifid); + M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); fini(); } @@ -1881,6 +1986,14 @@ static void multi_create_drop(void) 2); M0_UT_ASSERT(rep_check(0, 0, BUNSET, BUNSET)); M0_UT_ASSERT(rep_check(1, 0, BUNSET, BUNSET)); + /* Cleaning up allocated memory to avoid leaks. */ + meta_fop_submit(&cas_del_fopt, + (struct meta_rec[]) { + { .cid = nonce0 }, + { .cid = nonce1 } }, + 2); + M0_UT_ASSERT(rep.cgr_rc == 0); + M0_UT_ASSERT(rep.cgr_rep.cr_nr == 2); fini(); } diff --git a/dix/req.c b/dix/req.c index f5cb4b113fd..9ef7ba7df77 100644 --- a/dix/req.c +++ b/dix/req.c @@ -46,6 +46,7 @@ #include "dix/fid_convert.h" #include "dix/dix_addb.h" #include "dtm0/dtx.h" /* m0_dtx0_* API */ +#include "motr/idx.h" /* M0_DIX_MIN_REPLICA_QUORUM */ static struct m0_sm_state_descr dix_req_states[] = { [DIXREQ_INIT] = { @@ -209,11 +210,15 @@ M0_INTERNAL int m0_dix_req_wait(struct m0_dix_req *req, uint64_t states, static void dix_req_init(struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp, + int64_t min_success, bool meta) { + M0_PRE(ergo(min_success < 1, + min_success == M0_DIX_MIN_REPLICA_QUORUM)); M0_SET0(req); req->dr_cli = cli; req->dr_is_meta = meta; + req->dr_min_success = min_success; m0_sm_init(&req->dr_sm, &dix_req_sm_conf, DIXREQ_INIT, grp); m0_sm_addb2_counter_init(&req->dr_sm); } @@ -222,14 +227,15 @@ M0_INTERNAL void m0_dix_mreq_init(struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp) { - dix_req_init(req, cli, grp, true); + dix_req_init(req, cli, grp, 1, true); } M0_INTERNAL void m0_dix_req_init(struct m0_dix_req *req, struct m0_dix_cli *cli, - struct m0_sm_group *grp) + struct m0_sm_group *grp, + int64_t min_success) { - dix_req_init(req, cli, grp, false); + dix_req_init(req, cli, grp, min_success, false); } static enum m0_dix_req_state dix_req_state(const struct m0_dix_req *req) @@ -1304,12 +1310,13 @@ static int dix_rop_ctx_init(struct m0_dix_req *req, const struct m0_bufvec *keys, uint64_t *indices) { - struct m0_dix *dix = &req->dr_indices[0]; - struct m0_dix_ldesc *ldesc; - uint32_t keys_nr; - struct m0_buf key; - uint32_t i; - int rc = 0; + struct m0_dix *dix = &req->dr_indices[0]; + struct m0_dix_ldesc *ldesc; + struct m0_pool_version *pver; + uint32_t keys_nr; + struct m0_buf key; + uint32_t i; + int rc = 0; M0_ENTRY(); M0_PRE(M0_IS0(rop)); @@ -1320,6 +1327,13 @@ static int dix_rop_ctx_init(struct m0_dix_req *req, M0_PRE(keys_nr != 0); ldesc = &dix->dd_layout.u.dl_desc; rop->dg_pver = dix_pver_find(req, &ldesc->ld_pver); + M0_ASSERT(ergo(req->dr_min_success < 1, + req->dr_min_success == M0_DIX_MIN_REPLICA_QUORUM)); + if (req->dr_min_success == M0_DIX_MIN_REPLICA_QUORUM) { + pver = m0_dix_pver(req->dr_cli, &req->dr_indices[0]); + req->dr_min_success = (pver->pv_attr.pa_N + + pver->pv_attr.pa_K)/2 + 1; + } M0_ALLOC_ARR(rop->dg_rec_ops, keys_nr); M0_ALLOC_ARR(rop->dg_target_rop, rop->dg_pver->pv_attr.pa_P); if (rop->dg_rec_ops == NULL || rop->dg_target_rop == NULL) @@ -1402,6 +1416,20 @@ static void dix_rop(struct m0_dix_req *req) M0_LEAVE(); } +/** Checks if the given cas get reply has a newer version of the value */ +static int dix_item_version_cmp(const struct m0_dix_item *ditem, + const struct m0_cas_get_reply *get_rep) { + /* + * TODO: once cas versions are propagated, check if the get reply + * has a newer version than seen previously. Will need to add + * version info to struct m0_dix_item. This function should return + * true if no previous value is set, or if the previous value has + * an older version. For now, always return true so the last + * reply in the array wins. + */ + return -1; +} + static void dix_item_rc_update(struct m0_dix_req *req, struct m0_cas_req *creq, uint64_t key_idx, @@ -1418,7 +1446,8 @@ static void dix_item_rc_update(struct m0_dix_req *req, case DIX_GET: m0_cas_get_rep(creq, key_idx, &get_rep); rc = get_rep.cge_rc; - if (rc == 0) { + if (rc == 0 && dix_item_version_cmp(ditem, &get_rep) < 0) { + m0_buf_free(&ditem->dxi_val); ditem->dxi_val = get_rep.cge_val; /* Value will be freed at m0_dix_req_fini(). */ m0_cas_rep_mlock(creq, key_idx); @@ -1620,29 +1649,61 @@ static void dix_cas_rop_rc_update(struct m0_dix_cas_rop *cas_rop, int rc) static void dix_rop_completed(struct m0_sm_group *grp, struct m0_sm_ast *ast) { - struct m0_dix_req *req = ast->sa_datum; - struct m0_dix_rop_ctx *rop = req->dr_rop; - struct m0_dix_rop_ctx *rop_del_phase2 = NULL; - bool del_phase2 = false; - struct m0_dix_cas_rop *cas_rop; + struct m0_dix_req *req = ast->sa_datum; + struct m0_dix_rop_ctx *rop = req->dr_rop; + struct m0_dix_rop_ctx *rop_del_phase2 = NULL; + bool del_phase2 = false; + struct m0_dix_cas_rop *cas_rop; + int64_t min_success; + int64_t successful_ops = 0; (void)grp; if (req->dr_type == DIX_NEXT) m0_dix_next_result_prepare(req); else { + min_success = req->dr_min_success; + M0_ASSERT(min_success > 0); + + successful_ops = m0_tl_reduce(cas_rop, scan, &rop->dg_cas_reqs, 0, + + !!(scan->crp_creq.ccr_sm.sm_rc == 0)); + /* - * Consider DIX request to be successful if there is at least - * one successful CAS request. + * The idea here is that transient failures are likely to + * occur and may not persist long enough that the node gets + * marked as failed. These will still affect individual + * operations, so we need to make sure that dix correctly + * handles the issues (if possible) or returns a failure to + * the client. We therefore let the user choose min_success, + * which determines the minimum number of successful cas + * operations to consider the parent dix operation successful. + * This is necessary to ensure read-after-write consistency. + * If min_success is set to (N+K)/2 + 1 for both reads and + * writes, then even in the presence of transient failures at + * least one copy of the most recent version of data will be + * found. Other values can be set for reduced consistency or + * balancing read vs. write. + * + * Here we compare the previously computed successful_ops + * and min_success to decide if we can ignore failed cas + * operations. If successful_ops >= min_success, we've met + * the quorum requirement and can ignore failures. This is + * done by skipping dix_cas_rop_rc_update for failed cas + * operations. We're guaranteed to have at least one + * successful cas op somewhere in the list, so this results + * in the parent dix operation being considered a success, + * and cas version is used to break ties between multiple + * successful replies (see dix_item_version_cmp). In the + * case that successful_ops < min_success, we call + * dix_cas_rop_rc_update for every cas op, with the result + * that the failed operations will cause the parent dix op + * to fail. Since min_success must be greater than 0, this + * covers the case that all cas requests fail. */ - if (m0_tl_forall(cas_rop, cas_rop, - &rop->dg_cas_reqs, - cas_rop->crp_creq.ccr_sm.sm_rc != 0)) - dix_cas_rop_rc_update(cas_rop_tlist_tail( - &rop->dg_cas_reqs), 0); - m0_tl_for (cas_rop, &rop->dg_cas_reqs, cas_rop) { - if (cas_rop->crp_creq.ccr_sm.sm_rc == 0) + if (successful_ops < min_success || + cas_rop->crp_creq.ccr_sm.sm_rc == 0) { dix_cas_rop_rc_update(cas_rop, 0); + } m0_cas_req_fini(&cas_rop->crp_creq); } m0_tl_endfor; } @@ -2137,10 +2198,11 @@ static void dix_rop_units_set(struct m0_dix_req *req) m0_rwlock_read_unlock(&pm->pm_lock); /* - * Only one CAS GET request should be sent for every record. + * For meta requests, + * only one CAS GET request should be sent for every record. * Choose the best destination for every record. */ - if (req->dr_type == DIX_GET) { + if (req->dr_type == DIX_GET && req->dr_is_meta) { for (i = 0; i < rop->dg_rec_ops_nr; i++) dix_online_unit_choose(req, &rop->dg_rec_ops[i]); } diff --git a/dix/req.h b/dix/req.h index 4eeae2b5b81..3e05f8f0d9e 100644 --- a/dix/req.h +++ b/dix/req.h @@ -253,6 +253,11 @@ struct m0_dix_req { * starting key in DIX_NEXT request. */ uint32_t *dr_recs_nr; + /** + * Minimum number of successful CAS operations to treat + * parent DIX operation as successful. + */ + int64_t dr_min_success; /** Request flags bitmask of m0_cas_op_flags values. */ uint32_t dr_flags; @@ -283,7 +288,8 @@ struct m0_dix_next_reply { /** Initialises DIX request. */ M0_INTERNAL void m0_dix_req_init(struct m0_dix_req *req, struct m0_dix_cli *cli, - struct m0_sm_group *grp); + struct m0_sm_group *grp, + int64_t min_success); /** * Initialises DIX request operating with meta-indices. diff --git a/dix/ut/client_ut.c b/dix/ut/client_ut.c index 81470106e48..e5a9e8a3be1 100644 --- a/dix/ut/client_ut.c +++ b/dix/ut/client_ut.c @@ -1244,7 +1244,7 @@ static int dix_common_idx_flagged_op(const struct m0_dix *indices, int rc; int i; - m0_dix_req_init(&req, &dix_ut_cctx.cl_cli, dix_ut_cctx.cl_grp); + m0_dix_req_init(&req, &dix_ut_cctx.cl_cli, dix_ut_cctx.cl_grp, 1); m0_dix_req_lock(&req); switch (type) { case REQ_CREATE: @@ -1285,6 +1285,7 @@ static int dix_common_rec_op(const struct m0_dix *index, const struct m0_bufvec *keys, struct m0_bufvec *vals, const uint32_t *recs_nr, + int64_t min_success, uint32_t flags, struct dix_rep_arr *rep, enum ut_dix_req_type type) @@ -1294,7 +1295,7 @@ static int dix_common_rec_op(const struct m0_dix *index, int i; int k = 0; - m0_dix_req_init(&req, &dix_ut_cctx.cl_cli, dix_ut_cctx.cl_grp); + m0_dix_req_init(&req, &dix_ut_cctx.cl_cli, dix_ut_cctx.cl_grp, min_success); m0_dix_req_lock(&req); switch (type) { case REQ_PUT: @@ -1397,21 +1398,31 @@ static int dix_ut_put(const struct m0_dix *index, uint32_t flags, struct dix_rep_arr *rep) { - return dix_common_rec_op(index, keys, vals, NULL, flags, rep, REQ_PUT); + return dix_common_rec_op(index, keys, vals, NULL, 1, flags, rep, REQ_PUT); +} + +static int dix_ut_put_min_success(const struct m0_dix *index, + const struct m0_bufvec *keys, + struct m0_bufvec *vals, + int64_t min_success, + uint32_t flags, + struct dix_rep_arr *rep) +{ + return dix_common_rec_op(index, keys, vals, NULL, min_success, flags, rep, REQ_PUT); } static int dix_ut_get(const struct m0_dix *index, const struct m0_bufvec *keys, struct dix_rep_arr *rep) { - return dix_common_rec_op(index, keys, NULL, NULL, 0, rep, REQ_GET); + return dix_common_rec_op(index, keys, NULL, NULL, 1, 0, rep, REQ_GET); } static int dix_ut_del(const struct m0_dix *index, const struct m0_bufvec *keys, struct dix_rep_arr *rep) { - return dix_common_rec_op(index, keys, NULL, NULL, 0, rep, REQ_DEL); + return dix_common_rec_op(index, keys, NULL, NULL, 1, 0, rep, REQ_DEL); } static int dix_ut_next(const struct m0_dix *index, @@ -1420,7 +1431,7 @@ static int dix_ut_next(const struct m0_dix *index, uint32_t flags, struct dix_rep_arr *rep) { - return dix_common_rec_op(index, start_keys, NULL, recs_nr, flags, + return dix_common_rec_op(index, start_keys, NULL, recs_nr, 1, flags, rep, REQ_NEXT); } @@ -2655,17 +2666,35 @@ static void local_failures(void) dix_kv_alloc_and_fill(&keys, &vals, COUNT); rc = dix_common_idx_op(&index, 1, REQ_CREATE); M0_UT_ASSERT(rc == 0); + /* - * Consider DIX request to be successful if there is at least - * one successful CAS request. Here two cas requests can be - * sent successfully. + * Consider DIX request to be successful only if there are + * enough successful CAS requests to satisfy min_success. + * Here two cas requests can be sent successfully. First, try with + * min_success = 3, which should result in all CAS requests failing. */ m0_fi_enable_off_n_on_m("cas_req_replied_cb", "send-failure", 2, 3); - rc = dix_ut_put(&index, &keys, &vals, 0, &rep); + rc = dix_ut_put_min_success(&index, &keys, &vals, 3, 0, &rep); + m0_fi_disable("cas_req_replied_cb", "send-failure"); + M0_UT_ASSERT(rc == 0); + M0_UT_ASSERT(rep.dra_nr == COUNT); + M0_UT_ASSERT(m0_forall(i, COUNT, rep.dra_rep[i].dre_rc == -ENOTCONN)); + + dix_rep_free(&rep); + rc = dix_ut_del(&index, &keys, &rep); + M0_UT_ASSERT(rc == 0); + dix_rep_free(&rep); + + /* + * Now try again with min_success = 2, which should succeed. + */ + m0_fi_enable_off_n_on_m("cas_req_replied_cb", "send-failure", 2, 3); + rc = dix_ut_put_min_success(&index, &keys, &vals, 2, 0, &rep); m0_fi_disable("cas_req_replied_cb", "send-failure"); M0_UT_ASSERT(rc == 0); M0_UT_ASSERT(rep.dra_nr == COUNT); M0_UT_ASSERT(m0_forall(i, COUNT, rep.dra_rep[i].dre_rc == 0)); + dix_rep_free(&rep); dix_kv_destroy(&keys, &vals); dix_index_fini(&index); diff --git a/doc/CORTX-MOTR-ARCHITECTURE.md b/doc/CORTX-MOTR-ARCHITECTURE.md index 059468f208b..e9014fb22f9 100644 --- a/doc/CORTX-MOTR-ARCHITECTURE.md +++ b/doc/CORTX-MOTR-ARCHITECTURE.md @@ -75,7 +75,7 @@ # Object Layout # + Object is an array of blocks. Arbitrary scatter-gather IO with overwrite. Object has layout. + Default layout is parity de-clustered network raid: N+K+S striping. - + More details about [parity declustering](doc/pdclust/index.rst) + + More details about [parity declustering](pdclust/index.rst) + Layout takes hardware topology into account: distribute units to support fault-tolerance. ![image](./Images/6_Object_Layout.png) diff --git a/doc/HLD-Data-Integrity.md b/doc/HLD-Data-Integrity.md new file mode 100644 index 00000000000..f3d94f691f4 --- /dev/null +++ b/doc/HLD-Data-Integrity.md @@ -0,0 +1,145 @@ +# HLD of Data Integrity + +- I. Motr Client + - I.1 Application and motr data structure + - I.2 Parity Group Computation + - I.3 Tracking Data Unit Allocated to Object + - I.4 FOP Sending and Checksum Processing + - Write Path + - Read Path +- II. Motr Server Write Path + - II.1 Global Object => Component Object + - II.2 Balloc Processing + - Balloc extent and buffer extent processing + - II.3 EMAP Extent Processing + - II.4 COB-EMAP Details + - II.5 Checksum storage with EMAP Extent + +This document will give details of DI implementation in Motr + +## I. Motr Client +### I.1 Application and motr data structure +Application sends data as scatter gather list (SGL) of buffers (ioo_data), it also sends an index-list for object offset corresponding to the buffer (ioo_ext). There can be multiple send requests for reading/writing to the same object + +The example below describes scenario where application sends second request to motr for the same object. + +- Parity Stripe having N (Data Units) = 4; K (Parity Units) = 2; S (Spare Units) = 0 + +- Application buffer size 16KB + +- Unit Size (US) = 1MB + +- Motr Default Page Size (PS) = 4KB + +- Previous request has processed Data Unit 0-7 (DU) or Parity Group 0 (PG) & PG 1 + + - Current IO is for DU7-15 or PG 2 & PG 3 + +![image](./Images/DI01.png) +
Received from application
+ +### I.2 Parity Group Computation +- Motr client computes number of parity group in the request (ioo_iomap_nr) + +- Allocates data structure for all data(N) and parity units (K) + +- Populates parity group data structure for further processing (ioo_iomaps) + +- Data allocated are page or segment (4K) basis. + +![image](./Images/DI02.png) +Parity Group Data Structure
+ +### I.3 Tracking Data Unit Allocated to Object +For DI computation an array (ti_goff_ivec) for each target is allocated to track global offset of each segment. + +![image](./Images/DI03.png) +Mapping Data and Parity to Global Offset Space
+ +![image](./Images/DI04.png) +### I.4 FOP Sending and Checksum Processing +During FOP processing based on the DU goff which is added to the target structure (ti_goff_ivec), Parity Group Index and Data Unit Index is computed and stored in structure/array of FOP (irf_cksum_data) + +### Write Path + During write path the checksum for data also gets computed for each DU which is added to the FOP. Checksum computation is seeded with DU Index. + +![image](./Images/DI05.png) +### Read Path +During read path when the data is received from Motr Server, the checksum is computed and compared against received checksum + + + +![image](./Images/DI06.png) +## II. Motr Server Write Path +### II.1 Global Object => Component Object +Every Motr object is identified by FID also known as Global Object FID and its Stripe Units on devices are identified as Component Object FID. + +Component Object FID is derived from Global Object FID by adding Device ID to the Global Object FID. + +```c +// Logical representation +cob_fid = (gob_fid | device_id << M0_FID_DEVICE_ID_OFFSET) +``` +Every device on which stripe/shard of object is present will have COB entry. + +### II.2 Balloc Processing +Motr client send data buffer, checksum buffer using RPC to server. + +- Motr server requests blocks from the balloc module to cover the total size of data buffer sent by client + +- Balloc will attempt to allocate total size as one extent + + - If one chunk is not available then multiple balloc extent can be allocated + + - Currently more than one chunk will cause failure + +- In the diagram below it is shown that three balloc extents are getting allocated for two data DUs. + +### Balloc extent and buffer extent processing +As part of balloc processing, server code finds the number of contiguous fragment using overlap of balloc-extent and buffer extent. Also data structure is populated to track this. + +- m0_bufvec si_user : Tracking buffer fragment + +- m0_indexvec si_stob : Tracking balloc fragment + +![image](./Images/DI07.png) +Balloc Processing and Fragment Computation
+ +These balloc-extent along with its buffer from unit for Storage IO. + +![image](./Images/DI08.png) +### II.3 EMAP Extent Processing +As part of EMAP extent processing, contiguous fragment is computed using overlap of Object offset extent (COB Offset) and balloc extent. This EMAP fragment data is processed later and gets written to the device EMAP btree. + +EMAP Fragment Data consist of following important fields + +- COB Offset Extent + + - e_start + + - e_end + +- Balloc Extent Start + + - ee_val + +![image](./Images/DI09.png) +### II.4 COB-EMAP Details +- When COB is created a default entry for the object extent is created + + - Fake extent with a span of 0 to ∞ + +- If an entry at start gets added then it cuts into this Fake extent and creates two segment + + - New Entry. + + - Fake extent gets right shifted. + +![image](./Images/DI10.png) +Using above concepts the three EMAP extent gets added to EMAP metadata btree. + +### II.5 Checksum storage with EMAP Extent +Checksum for all the DUs which are starting in a Balloc extent, gets added to that corresponding EMAP entry. During EMAP processing checksum gets correctly transferred to the extent and gets written in btree node. +![image](./Images/DI11.png) + + diff --git a/doc/HLD-of-SNS-Repair.md b/doc/HLD-of-SNS-Repair.md index d6a1e2bf07b..748184fa0a6 100644 --- a/doc/HLD-of-SNS-Repair.md +++ b/doc/HLD-of-SNS-Repair.md @@ -89,7 +89,7 @@ Following topics deserve attention: * Details of interaction between repair and DTM must be specified. * Redundancy other than N+1 (N+K, K > 1) must be regarded as a default configuration. * Multiple failures and repair in the presence of multiple failures must be considered systematically. -* Repair and re-balancing must be clearly distinguished. +* Repair and re-balancing must be distinguished appropriately. * Reclaim of a distributed spare space must be addressed (this is done in a separate Distributed Spare design documentation). * locking optimizations. @@ -150,7 +150,7 @@ Agent iterates components over the affected container or all the containers whic ### 5.11. SNS repair and layout ### The SNS manager gets an input set configuration and output set configuration as the repair is initiated. These input/output sets can be described by some form of layout. The SNS repair will read the data/parity from the devices described with the input set and reconstruct the missing data. In the process of reconstruction object layouts affected by the data reconstruction (layouts with data located on the lost storage device or node) are transactionally updated to reflect changed data placement. Additionally, while the reconstruction is in-progress, all affected layouts are switched into a degraded mode so that the clients can continue to access and modify data. -Note that the standard mode of operation is a so-called "non-blocking availability" (NBA) where after a failure the client can immediately continue writing new data without any IO degradation. To this end, a client is handed out a new layout to which it can write. After this point, the cluster-wide object has a composite layout: some parts of the object's linear name-space are laid accordingly to the old layout, and other parts (ones where clients write after a failure)—are a new one. In this configuration, clients never write to the old layout, while its content is being reconstructed. +Note that the standard mode of operation is a so-called "non-blocking availability" (NBA) where after a failure the client can immediately continue writing new data without any IO degradation. To this end, a client is handed out a new layout to which it can write. After this point, the cluster-wide object has a composite layout: some parts of the object's linear name-space are mapped accordingly to the old layout, and other parts (ones where clients write after a failure)—are a new one. In this configuration, clients never write to the old layout, while its content is being reconstructed. The situation where there is a client-originated IO against layouts being reconstructed is possible because of: * Reads have to access old data even under NBA policy and diff --git a/doc/HLD-of-SNS-client.md b/doc/HLD-of-SNS-client.md index 2d9d32b4db9..ef211ee5ea9 100644 --- a/doc/HLD-of-SNS-client.md +++ b/doc/HLD-of-SNS-client.md @@ -42,7 +42,7 @@ External SNS client interfaces are standard Linux file_operations and address_sp ## Logical Specification ### fop builder, NRS, and request handler -A fop, representing IO operation is created at the VFS or VM entry point1. The fop is then passed to the dummy NRS(23), which immediately passes it down to the request handler. The request handler uses file meta-data to identify the layout and calls the layout IO engine to proceed with the IO operation. +A fop, representing IO operation is created at the VFS or VM entry point1. The fop is then passed to the fake NRS(23), which immediately passes it down to the request handler. The request handler uses file meta-data to identify the layout and calls the layout IO engine to proceed with the IO operation. ### Layout Schema The layout formula generates a parity de-clustered file layout for a particular file, using file id (fid) as an identifier[2]. See Parity De-clustering Algorithm HLD [3] for details. At the moment, **m0t1fs** supports a single file with fid supplied as a mount option. diff --git a/doc/ISC-Service-User-Guide b/doc/ISC-Service-User-Guide index e78000ad3b5..00df051b4b6 100644 --- a/doc/ISC-Service-User-Guide +++ b/doc/ISC-Service-User-Guide @@ -96,11 +96,11 @@ Consider a simple API that on reception of string “Hello” responds with “W char *in_string, struct m0_rpc_conn *conn) { int rc; - /* A string is mapped to a mero buffer. */ + /* A string is mapped to a motr buffer. */ m0_buf_init(in_args, in_string, strlen(in_string)); /* Initialise RPC adaptive transmission data structure. */ m0_rpc_at_init(&isc_fop->fi_args); - /* Add mero buffer to m0_rpc_at */ + /* Add motr buffer to m0_rpc_at */ rc = m0_rpc_at_add(&isc_fop->fi_args, in_args, conn); @@ -198,7 +198,7 @@ We now discuss the callee side code. Let’s assume that the function is registe if (m0_buf_streq(in, “Hello”)) { /* * The string allocated here should not be freed by - * computation and Mero takes care of freeing it. + * computation and Motr takes care of freeing it. */ out_str = m0_strdup(“World”); @@ -224,7 +224,7 @@ Suppose we have a collection of arrays of integers, each stored as a Motr object ``` /* Arguments for getting min/max. */ struct arr_fids { - /* Number of arrays stored with Mero. */ + /* Number of arrays stored with Motr. */ uint32_t af_arr_nr; /* An array holding unique identifiers of arrays. */ struct m0_fid *af_gfids @@ -280,7 +280,7 @@ struct histo_args { /** Minimum value. */ uint64_t ha_min_val; - /** Global fid of object stored with Mero. */ + /** Global fid of object stored with Motr. */ struct m0_fid ha_gob_fid; } M0_XCA_RECORD; @@ -295,7 +295,7 @@ Here we discuss the API for generating a histogram of values, local to a node. T * Structure of a computation is advisable to be similar to * Motr foms. It returns M0_FSO_WAIT when it has to wait for * an external event (n/w or disk I/O)else it returns - * M0_FSO_AGAIN. These two symbols are defined in Mero. + * M0_FSO_AGAIN. These two symbols are defined in Motr. */ int histo_generate(struct m0_buf *in, struct m0_buf *out, struct m0_isc_comp_private *comp_data, diff --git a/doc/ISC-Service-User-Guide.md b/doc/ISC-Service-User-Guide.md index 8b8e9366d68..7f97eac33b3 100644 --- a/doc/ISC-Service-User-Guide.md +++ b/doc/ISC-Service-User-Guide.md @@ -96,11 +96,11 @@ Consider a simple API that on reception of string “Hello” responds with “W char *in_string, struct m0_rpc_conn *conn) { int rc; - /* A string is mapped to a mero buffer. */ + /* A string is mapped to a motr buffer. */ m0_buf_init(in_args, in_string, strlen(in_string)); /* Initialise RPC adaptive transmission data structure. */ m0_rpc_at_init(&isc_fop->fi_args); - /* Add mero buffer to m0_rpc_at */ + /* Add motr buffer to m0_rpc_at */ rc = m0_rpc_at_add(&isc_fop->fi_args, in_args, conn); @@ -199,7 +199,7 @@ We now discuss the callee side code. Let’s assume that the function is registe if (m0_buf_streq(in, “Hello”)) { /* * The string allocated here should not be freed by - * computation and Mero takes care of freeing it. + * computation and Motr takes care of freeing it. */ out_str = m0_strdup(“World”); @@ -225,7 +225,7 @@ Suppose we have a collection of arrays of integers, each stored as a Motr object ```C /* Arguments for getting min/max. */ struct arr_fids { - /* Number of arrays stored with Mero. */ + /* Number of arrays stored with Motr. */ uint32_t af_arr_nr; /* An array holding unique identifiers of arrays. */ struct m0_fid *af_gfids @@ -281,7 +281,7 @@ struct histo_args { /** Minimum value. */ uint64_t ha_min_val; - /** Global fid of object stored with Mero. */ + /** Global fid of object stored with Motr. */ struct m0_fid ha_gob_fid; } M0_XCA_RECORD; @@ -295,7 +295,7 @@ Here we discuss the API for generating a histogram of values, local to a node. T * Structure of a computation is advisable to be similar to * Motr foms. It returns M0_FSO_WAIT when it has to wait for * an external event (n/w or disk I/O)else it returns - * M0_FSO_AGAIN. These two symbols are defined in Mero. + * M0_FSO_AGAIN. These two symbols are defined in Motr. */ ```C int histo_generate(struct m0_buf *in, struct m0_buf *out, diff --git a/doc/Images/DI01.png b/doc/Images/DI01.png new file mode 100644 index 00000000000..1ae20a44047 Binary files /dev/null and b/doc/Images/DI01.png differ diff --git a/doc/Images/DI02.png b/doc/Images/DI02.png new file mode 100644 index 00000000000..e3310c80807 Binary files /dev/null and b/doc/Images/DI02.png differ diff --git a/doc/Images/DI03.png b/doc/Images/DI03.png new file mode 100644 index 00000000000..b29c84aff3f Binary files /dev/null and b/doc/Images/DI03.png differ diff --git a/doc/Images/DI04.png b/doc/Images/DI04.png new file mode 100644 index 00000000000..1a7631b42da Binary files /dev/null and b/doc/Images/DI04.png differ diff --git a/doc/Images/DI05.png b/doc/Images/DI05.png new file mode 100644 index 00000000000..440d1c63dcd Binary files /dev/null and b/doc/Images/DI05.png differ diff --git a/doc/Images/DI06.png b/doc/Images/DI06.png new file mode 100644 index 00000000000..01c3eebd219 Binary files /dev/null and b/doc/Images/DI06.png differ diff --git a/doc/Images/DI07.png b/doc/Images/DI07.png new file mode 100644 index 00000000000..969c474ffb9 Binary files /dev/null and b/doc/Images/DI07.png differ diff --git a/doc/Images/DI08.png b/doc/Images/DI08.png new file mode 100644 index 00000000000..3a08e4af500 Binary files /dev/null and b/doc/Images/DI08.png differ diff --git a/doc/Images/DI09.png b/doc/Images/DI09.png new file mode 100644 index 00000000000..9b7a614938d Binary files /dev/null and b/doc/Images/DI09.png differ diff --git a/doc/Images/DI10.png b/doc/Images/DI10.png new file mode 100644 index 00000000000..d0078c9e6c5 Binary files /dev/null and b/doc/Images/DI10.png differ diff --git a/doc/Images/DI11.png b/doc/Images/DI11.png new file mode 100644 index 00000000000..5f672dd4215 Binary files /dev/null and b/doc/Images/DI11.png differ diff --git a/doc/Images/DTM0R Components and message flow.svg b/doc/Images/DTM0R Components and message flow.svg new file mode 100644 index 00000000000..f66562d3229 --- /dev/null +++ b/doc/Images/DTM0R Components and message flow.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/Motr-Lnet-Transport.md b/doc/Motr-Lnet-Transport.md index 94c849e7fa3..a1c0bb58c74 100644 --- a/doc/Motr-Lnet-Transport.md +++ b/doc/Motr-Lnet-Transport.md @@ -381,7 +381,7 @@ A Motr server uses the following pattern to use the LNet transport to initiate a A Motr tool uses the following pattern to use the LNet transport to initiate passive bulk tranfers to Motr server components: -1. The tool should use an end point address that is not assigned to any mero server or file system client. It should use a dynamic address to achieve this. +1. The tool should use an end point address that is not assigned to any motr server or file system client. It should use a dynamic address to achieve this. 2. To perform a bulk operation, the tool provisions a network buffer. The tool then registers this buffer and enqueues the buffer for transmission. 3. When a buffer operation completes, the buffer can be de-registered and the memory can be de-provisioned. @@ -437,7 +437,7 @@ LNet is capable of running without Lustre, but currently is distributed only thr ### References * [1] T1 Task Definitions -* [2] Mero Summary Requirements Table +* [2] Motr Summary Requirements Table * [3] m0 Glossary * [4] m0LNet Preliminary Design Questions * [5] RPC Bulk Transfer Task Plan diff --git a/doc/RPC_Layer_Core.rst b/doc/RPC_Layer_Core.rst index a2dde9ebdcb..cc2344de679 100644 --- a/doc/RPC_Layer_Core.rst +++ b/doc/RPC_Layer_Core.rst @@ -60,7 +60,7 @@ Requirements - [r.rpccore.efficient.bulk] 0-copy, if provided by the underlying network transport, is utilized; -- [r.rpccore.eos] support ordered exactly once semantics (EOS) of delivery; +- [r.rpccore.exactly-once-semantics] support ordered exactly once semantics of delivery; - [r.rpccore.formation.settings] support different setting like max_rpc_in_flight, max_page_per_rpc, etc. @@ -239,7 +239,7 @@ Cached FOPs might have dependencies each on other. This could affect the order o - m0_rpcmachine is a RPC processing machine, several instances of it might be existing simultaneously. -- m0_update_stream is an ADT associated with sessions and slots used for FOP sending with FIFO and EOS constrains. +- m0_update_stream is an ADT associated with sessions and slots used for FOP sending with FIFO and CORTX constrains. ********************* diff --git a/doc/Seagate-FDMI-HLD.md b/doc/Seagate-FDMI-HLD.md index 1ae638c0620..cb4e100d6c5 100644 --- a/doc/Seagate-FDMI-HLD.md +++ b/doc/Seagate-FDMI-HLD.md @@ -12,7 +12,7 @@ # Introduction # ## 1.1 Document's Purpose ## -The document is intended to specify the design of Mero FDMI interface. FDMI is a part of Mero product. FDMI provides interface for Mero plugins and allows horizontally extending the features and capabilities of the system. +The document is intended to specify the design of Motr FDMI interface. FDMI is a part of Motr product. FDMI provides interface for Motr plugins and allows horizontally extending the features and capabilities of the system. ## 1.2 Intended Audience ## * Product Architect @@ -23,12 +23,12 @@ The document is intended to specify the design of Mero FDMI interface. FDMI is a FDMI: File data manipulation interface ## 1.4 References ## -1. “Mero Object Store Architecture: Technical” MeroTechnicalWhitepaper.pdf -2. “mero a scalable storage platform” Mero technical (toi).pdf +1. “Motr Object Store Architecture: Technical” MotrTechnicalWhitepaper.pdf +2. “motr a scalable storage platform” Motr technical (toi).pdf 3. fdmihighleveldecomposition.pdf # Overview # -Mero is a storage core capable of deployment for a wide range of large scale storage regimes, from cloud and enterprise systems to exascale HPC installations. FDMI is a part of Mero core, providing interface for plugins implementation. FDMI is build around the core and allows for horizontally extending the features and capabilities of the system in a scalable and reliable manner. +Motr is a storage core capable of deployment for a wide range of large scale storage regimes, from cloud and enterprise systems to exascale HPC installations. FDMI is a part of Motr core, providing interface for plugins implementation. FDMI is build around the core and allows for horizontally extending the features and capabilities of the system in a scalable and reliable manner. ## 1.5 Product Purpose ## TBD @@ -52,9 +52,9 @@ In this section only architectural information like the following is displayed b -## 1.7 FDMI position in overall Mero Core design ## +## 1.7 FDMI position in overall Motr Core design ## -FDMI is an interface allowing Mero Core scale horizontally. The scaling includes two aspects: +FDMI is an interface allowing Motr Core scale horizontally. The scaling includes two aspects: * Core expansion in aspect of adding core data processing abilities, including data volumes as well as transformation into alternative representation. The expansion is provided by introducing FDMI plug-ins. @@ -62,15 +62,15 @@ FDMI is an interface allowing Mero Core scale horizontally. The scaling includes * Core expansion in aspect of adding new types of data the core is able to feed plug-ins. This sort of expansion is provided by introducing FDMI sources. - * Initial design implies that FOL record is the only source data type Mero Core provides so far. + * Initial design implies that FOL record is the only source data type Motr Core provides so far. -FDMI plug-in is an application linked with Mero Core to make use of corresponding FDMI interfaces and run separate from Mero instance/services. The purpose of introducing plug-in is getting notifications from Mero Core about particular changes in stored data and further post-processing of the data intended for producing some additional classes of data the Core currently is not able to provide. +FDMI plug-in is an application linked with Motr Core to make use of corresponding FDMI interfaces and run separate from Motr instance/services. The purpose of introducing plug-in is getting notifications from Motr Core about particular changes in stored data and further post-processing of the data intended for producing some additional classes of data the Core currently is not able to provide. -Instead, FDMI source appears to be a part of Mero instance being linked with appropriate FDMI interfaces and allowing connection to additional data providers. +Instead, FDMI source appears to be a part of Motr instance being linked with appropriate FDMI interfaces and allowing connection to additional data providers. -Considering the amount of data Mero Core operates with it obvious that plug-in typically requires a sufficiently reduced bulk of data to be routed to it for post-processing. The reduction is provided by introduction of mechanism of subscription to particular data types and conditions met at runtime. The subscription mechanism is based on set of filters the plug-in registers in Mero Filter Database during its initialization. +Considering the amount of data Motr Core operates with it obvious that plug-in typically requires a sufficiently reduced bulk of data to be routed to it for post-processing. The reduction is provided by introduction of mechanism of subscription to particular data types and conditions met at runtime. The subscription mechanism is based on set of filters the plug-in registers in Motr Filter Database during its initialization. Source in its turn refreshes its own subset of filters against the database. The subset is selected from overall filter set based on the knowledge about data types the source is able to feed FDMI with as well as operation with the data the source supports. @@ -80,7 +80,7 @@ FDMI consists of APIs implementing particular roles in accordance with FDMI use * Plug-in dock, responsible for: * Plug-in registration in FDMI instance - * Filter registration in Mero Filter Database + * Filter registration in Motr Filter Database * Listening to notifications coming over RPC * Payload processing * Self-diagnostic (TBD) @@ -89,7 +89,7 @@ FDMI consists of APIs implementing particular roles in accordance with FDMI use * Source registration * Retrieving/refreshing filter set for the source * Input data filtration - * Deciding on and posting notifications to filter subscribers over Mero RPC + * Deciding on and posting notifications to filter subscribers over Motr RPC * Deferred input data release * Self-diagnostic (TBD) @@ -176,6 +176,6 @@ Input data may require to remain locked in the Source until the moment when plug ![image](./images/Image8_FDMIserviceFoundDead.PNG) -When interaction between Mero services results in a timeout exceeding pre-configured value, the not responding service needs to be announced dead across the whole system. First of all **confd** service is notified about the service not responding. After being marked dead in **confd** database, the service has to be reported to **filterd** as well. The main purpose is to deregister FDMI sources hosted by the service, if any, to stop propagating **filterd** database changes to those. +When interaction between Motr services results in a timeout exceeding pre-configured value, the not responding service needs to be announced dead across the whole system. First of all **confd** service is notified about the service not responding. After being marked dead in **confd** database, the service has to be reported to **filterd** as well. The main purpose is to deregister FDMI sources hosted by the service, if any, to stop propagating **filterd** database changes to those. As well, the moment of the last instance of the source type coming out, the corresponding plug-ins might be notified. diff --git a/doc/faq.rst b/doc/faq.rst index 96534f4f582..dc2db05b3db 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -43,6 +43,6 @@ Mero -> Motr rename make[1]: *** [all-recursive] Error 1 make: *** [all] Error 2 - A: Remove ``/etc/ld.so.conf.d/mero.conf``, then rebuild Motr after ``git + A: Remove ``/etc/ld.so.conf.d/motr.conf``, then rebuild Motr after ``git clean -dfx`` (WARNING: removes all files that are not staged and are not in the repo). diff --git a/doc/fdmi_demo/demo-fdmi/m0-instance/Makefile b/doc/fdmi_demo/demo-fdmi/m0-instance/Makefile index 194c059f9aa..a2c205fa47a 100755 --- a/doc/fdmi_demo/demo-fdmi/m0-instance/Makefile +++ b/doc/fdmi_demo/demo-fdmi/m0-instance/Makefile @@ -1,6 +1,6 @@ CC=gcc -MERO_PATH=/root/mero-true-bulk-rebased +MOTR_PATH=/root/motr-true-bulk-rebased LUSTRE_PATH=/usr/src/lustre-2.7.18.4-headers CFLAGS=-g -std=gnu99 -Wall -Werror -Wno-attributes -Wno-unused-variable \ @@ -8,10 +8,10 @@ CFLAGS=-g -std=gnu99 -Wall -Werror -Wno-attributes -Wno-unused-variable \ -DM0_EXTERN=extern -fno-strict-aliasing -fno-omit-frame-pointer -fno-common \ -fPIC -INCLUDE_FLAGS=-include config.h -I$(MERO_PATH) -I$(LUSTRE_PATH)/lnet/include \ +INCLUDE_FLAGS=-include config.h -I$(MOTR_PATH) -I$(LUSTRE_PATH)/lnet/include \ -I$(LUSTRE_PATH)/lustre/include -LDFLAGS=-L$(MERO_PATH)/extra-libs/gf-complete/src/.libs -L$(MERO_PATH)/mero/.libs -lm -lpthread -lrt -lgf_complete -lyaml -luuid -lmero +LDFLAGS=-L$(MOTR_PATH)/extra-libs/gf-complete/src/.libs -L$(MOTR_PATH)/motr/.libs -lm -lpthread -lrt -lgf_complete -lyaml -luuid -lmotr OBJS=src/main.o diff --git a/doc/fdmi_demo/demo-fdmi/m0-instance/src/main.c b/doc/fdmi_demo/demo-fdmi/m0-instance/src/main.c index f69361707ea..662506345ea 100755 --- a/doc/fdmi_demo/demo-fdmi/m0-instance/src/main.c +++ b/doc/fdmi_demo/demo-fdmi/m0-instance/src/main.c @@ -24,7 +24,7 @@ #include "pool/pool.h" /* m0_pool_version */ #include "conf/confc.h" /* m0_confc_close */ #include "net/lnet/lnet.h" /* m0_net_lnet_xprt */ -#include "mero/ha.h" +#include "motr/ha.h" #include "rpc/rpc_machine.h" /* m0_rpc_machine */ #include "rpc/rpc.h" /* m0_rpc_bufs_nr */ #include "reqh/reqh.h" /* m0_reqh */ @@ -34,9 +34,9 @@ #include "fdmi/service.h" #include "fdmi/plugin_dock.h" #include "fdmi/plugin_dock_internal.h" -#include+ The @c dld_sample_ds1 structure tracks the density of the + electro-magnetic field with the following: +@code +struct dld_sample_ds1 { + ... + int dsd_flux_density; + ... +}; +@endcode + The value of this field is inversely proportional to the square of the + number of lines of comments in the DLD. + |