diff --git a/apps/leo_storage/src/leo_storage_handler_object.erl b/apps/leo_storage/src/leo_storage_handler_object.erl index 0dddf802..2832edc8 100644 --- a/apps/leo_storage/src/leo_storage_handler_object.erl +++ b/apps/leo_storage/src/leo_storage_handler_object.erl @@ -1293,8 +1293,6 @@ read_and_repair_3({ok, Metadata, #?OBJECT{data = Bin, case (NumOfReplicas > 0 andalso Preferred_R > 0) of true -> - ?debugVal({NumOfReplicas, Preferred_R}), - ?debugVal(Redundancies), {Preferred_R, lists:sublist(Redundancies, NumOfReplicas - 1)}; false -> {Quorum, Redundancies} diff --git a/apps/leo_storage/src/leo_storage_mq.erl b/apps/leo_storage/src/leo_storage_mq.erl index 7d70e75f..d689cf8b 100644 --- a/apps/leo_storage/src/leo_storage_mq.erl +++ b/apps/leo_storage/src/leo_storage_mq.erl @@ -440,16 +440,16 @@ handle_call(_,_,_) -> %%-------------------------------------------------------------------- %% @doc synchronize by vnode-id. %% @private --spec(recover_node(atom()) -> - ok). +-spec(recover_node(Node) -> + ok when Node::node()). recover_node(Node) -> Callback = recover_node_callback(Node), _ = leo_object_storage_api:fetch_by_addr_id(0, Callback), ok. %% @private --spec(recover_node_callback(atom()) -> - any()). +-spec(recover_node_callback(Node) -> + any() when Node::node()). recover_node_callback(Node) -> fun(K, V, Acc) -> Metadata_1 = binary_to_term(V), @@ -512,41 +512,17 @@ recover_node_callback_2([SrcNode|Rest], AddrId, Key, FixedNode) -> send_object_to_remote_node(Node, AddrId, Key) -> Ref = make_ref(), case leo_storage_handler_object:get({Ref, Key}) of - {ok, Ref, #?METADATA{num_of_replicas = Preferred_N} = Metadata, Bin} -> - %% Check redundant nodes by key's addr_id - %% whether this node needs to store the object OR not - case leo_redundant_manager_api:get_redundancies_by_addr_id(AddrId) of - {ok, #redundancies{nodes = Redundancies, - n = N}} -> - Redundancies_1 = - case (N > Preferred_N andalso Preferred_N > 0) of - true -> - lists:sublist(Redundancies, Preferred_N); - false -> - Redundancies - end, - - case lists:filter( - fun(#redundant_node{node = RedundantNode}) -> - Node == RedundantNode - end, Redundancies_1) of - [] -> - ok; - _ -> - case rpc:call(Node, leo_sync_local_cluster, store, - [Metadata, Bin], ?DEF_REQ_TIMEOUT) of - ok -> - ok; - {error, inconsistent_obj} -> - ?MODULE:publish(?QUEUE_ID_PER_OBJECT, - AddrId, Key, ?ERR_TYPE_RECOVER_DATA); - _ -> - ?MODULE:publish(?QUEUE_ID_PER_OBJECT, AddrId, Key, - Node, true, ?ERR_TYPE_RECOVER_DATA) - end - end; - {error, Reason} -> - {error, Reason} + {ok, Ref, Metadata, Bin} -> + case rpc:call(Node, leo_sync_local_cluster, store, + [Metadata, Bin], ?DEF_REQ_TIMEOUT) of + ok -> + ok; + {error, inconsistent_obj} -> + ?MODULE:publish(?QUEUE_ID_PER_OBJECT, + AddrId, Key, ?ERR_TYPE_RECOVER_DATA); + _ -> + ?MODULE:publish(?QUEUE_ID_PER_OBJECT, AddrId, Key, + Node, true, ?ERR_TYPE_RECOVER_DATA) end; {error, Ref, Cause} -> {error, Cause}; @@ -557,8 +533,12 @@ send_object_to_remote_node(Node, AddrId, Key) -> %% @doc synchronize by vnode-id. %% @private --spec(sync_vnodes(atom(), integer(), list()) -> - ok). +-spec(sync_vnodes(Node, RingHash, ListOfFromToAddrId) -> + ok when Node::node(), + RingHash::integer(), + FromAddrId::integer(), + ToAddrId::integer(), + ListOfFromToAddrId::[{FromAddrId, ToAddrId}]). sync_vnodes(_, _, []) -> ok; sync_vnodes(Node, RingHash, [{FromAddrId, ToAddrId}|T]) -> @@ -568,8 +548,10 @@ sync_vnodes(Node, RingHash, [{FromAddrId, ToAddrId}|T]) -> sync_vnodes(Node, RingHash, T). %% @private --spec(sync_vnodes_callback(atom(), pos_integer(), pos_integer()) -> - any()). +-spec(sync_vnodes_callback(Node, FromAddrId, ToAddrId) -> + any() when Node::node(), + FromAddrId::integer(), + ToAddrId::integer()). sync_vnodes_callback(Node, FromAddrId, ToAddrId)-> fun(_K, V, Acc) -> %% Note: An object of copy is NOT equal current ring-hash. @@ -605,8 +587,11 @@ sync_vnodes_callback(Node, FromAddrId, ToAddrId)-> %% @doc Remove a node from redundancies %% @private --spec(delete_node_from_redundancies(list(#redundant_node{}), atom(), list(#redundant_node{})) -> - {ok, list(#redundant_node{})}). + +-spec(delete_node_from_redundancies(Redundancies, Node, AccRedundancies) -> + {ok, AccRedundancies} when Redundancies::[#redundant_node{}], + Node::node(), + AccRedundancies::[#redundant_node{}]). delete_node_from_redundancies([],_,Acc) -> {ok, lists:reverse(Acc)}; delete_node_from_redundancies([#redundant_node{node = Node}|Rest], Node, Acc) -> @@ -617,8 +602,10 @@ delete_node_from_redundancies([RedundantNode|Rest], Node, Acc) -> %% @doc Find a node from redundancies %% @private --spec(find_node_from_redundancies(list(#redundant_node{}), atom()) -> - boolean()). +-spec(find_node_from_redundancies(Redundancies, Node) -> + Ret when Redundancies::[#redundant_node{}], + Node::node(), + Ret::boolean()). find_node_from_redundancies([],_) -> false; find_node_from_redundancies([#redundant_node{node = Node}|_], Node) -> @@ -629,8 +616,11 @@ find_node_from_redundancies([_|Rest], Node) -> %% @doc Notify a message to manager node(s) %% @private --spec(notify_message_to_manager(list(), integer(), atom()) -> - ok | {error, any()}). +-spec(notify_message_to_manager(ManagerNodes, VNodeId, Node) -> + ok | {error, Cause} when ManagerNodes::[node()], + VNodeId::integer(), + Node::node(), + Cause::any()). notify_message_to_manager([],_VNodeId,_Node) -> {error, 'fail_notification'}; notify_message_to_manager([Manager|T], VNodeId, Node) -> @@ -657,8 +647,9 @@ notify_message_to_manager([Manager|T], VNodeId, Node) -> %% @doc correct_redundancies/1 - first. %% @private --spec(correct_redundancies(binary()) -> - ok | {error, any()}). +-spec(correct_redundancies(Key) -> + ok | {error, Cause} when Key::binary(), + Cause::any()). correct_redundancies(Key) -> case leo_redundant_manager_api:get_redundancies_by_key(Key) of {ok, #redundancies{nodes = Redundancies, @@ -842,14 +833,22 @@ rebalance_2({ok, Redundancies}, #rebalance_message{node = Node, AddrId, Key, Redundancies), case find_node_from_redundancies(Redundancies_1, erlang:node()) of true -> - send_object_to_remote_node(Node, AddrId, Key); + case lists:filter( + fun(#redundant_node{node = RedundantNode}) -> + Node == RedundantNode + end, Redundancies_1) of + [] -> + ?MODULE:publish(?QUEUE_ID_PER_OBJECT, + AddrId, Key, ?ERR_TYPE_RECOVER_DATA); + _ -> + send_object_to_remote_node(Node, AddrId, Key) + end; false -> ?warn("rebalance_2/2", [{node, Node}, {addr_id, AddrId}, {key, Key}, {cause, 'node_not_found'}]), - ok = publish(?QUEUE_ID_PER_OBJECT, - AddrId, Key, ?ERR_TYPE_REPLICATE_DATA), - ok + publish(?QUEUE_ID_PER_OBJECT, + AddrId, Key, ?ERR_TYPE_REPLICATE_DATA) end. @@ -940,8 +939,11 @@ fix_consistency_between_clusters(#inconsistent_data_with_dc{ %%-------------------------------------------------------------------- %% @doc Lookup rebalance counter %% @private --spec(ets_lookup(atom(), integer()) -> - list() | {error, any()}). +-spec(ets_lookup(Table, Key) -> + {ok, Value} | {error, Cause} when Table::atom(), + Key::binary(), + Value::integer(), + Cause::any()). ets_lookup(Table, Key) -> case catch ets:lookup(Table, Key) of [] -> diff --git a/apps/leo_storage/src/leo_sync_remote_cluster.erl b/apps/leo_storage/src/leo_sync_remote_cluster.erl index d1bb84e0..9b2b0b6f 100644 --- a/apps/leo_storage/src/leo_sync_remote_cluster.erl +++ b/apps/leo_storage/src/leo_sync_remote_cluster.erl @@ -452,15 +452,6 @@ replicate(ClusterId, Object) -> %% @private replicate_1({ok, MDCR_N}, Object) -> Object_1 = Object#?OBJECT{num_of_replicas = MDCR_N}, - %% @DEBUG >> - lists:foreach(fun(X) -> - ?debugVal(X) - end, lists:zip( - record_info( - fields, ?OBJECT), - tl(tuple_to_list(Object_1)))), - %% << - Ret = case leo_storage_handler_object:replicate(Object_1) of {ok,_ETag} -> {ok, leo_object_storage_transformer:object_to_metadata(Object_1)}; diff --git a/mdcr.sh b/mdcr.sh index b28c1bce..ceb082c6 100755 --- a/mdcr.sh +++ b/mdcr.sh @@ -4,7 +4,7 @@ # GLOBALS #------------------------------------------------------------------------------- NCLUSTERS=2 -CLUSTER_NSTORAGES=3 +CLUSTER_NSTORAGES=4 #------------------------------------------------------------------------------- # ROUTINES diff --git a/priv/test/mdcr-test/c1/leo_manager.conf.0 b/priv/test/mdcr-test/c1/leo_manager.conf.0 index 0dbe6c4f..b767e952 100644 --- a/priv/test/mdcr-test/c1/leo_manager.conf.0 +++ b/priv/test/mdcr-test/c1/leo_manager.conf.0 @@ -61,16 +61,16 @@ system.cluster_id = leofs_1 ## * See: https://leo-project.net/leofs/docs/configuration/configuration_1.html ## -------------------------------------------------------------------- ## A number of replicas -consistency.num_of_replicas = 2 +consistency.num_of_replicas = 3 ## A number of replicas needed for a successful WRITE operation -consistency.write = 1 +consistency.write = 2 ## A number of replicas needed for a successful READ operation consistency.read = 1 ## A number of replicas needed for a successful DELETE operation -consistency.delete = 1 +consistency.delete = 2 ## A number of rack-aware replicas consistency.rack_aware_replicas = 0 @@ -82,8 +82,24 @@ consistency.rack_aware_replicas = 0 ## A number of replication targets mdc_replication.max_targets = 2 -## A number of replicas a DC -mdc_replication.num_of_replicas_a_dc = 1 +## A number of replicas per a datacenter +## [note] A local LeoFS sends a stacked object which contains an items of a replication method: +## - [L1_N] A number of replicas +## - [L1_W] A number of replicas needed for a successful WRITE operation +## - [L1_R] A number of replicas needed for a successful READ operation +## - [L1_D] A number of replicas needed for a successful DELETE operation +## A remote cluster of a LeoFS system which receives its object, +## and then replicates it by its contained reoplication method. +mdc_replication.num_of_replicas_a_dc = 2 + +## MDC replication / A number of replicas needed for a successful WRITE operation +mdc_replication.consistency.write = 1 + +## MDC replication / A number of replicas needed for a successful READ operation +mdc_replication.consistency.read = 1 + +## MDC replication / A number of replicas needed for a successful DELETE operation +mdc_replication.consistency.delete = 1 ## -------------------------------------------------------------------- diff --git a/priv/test/mdcr-test/c1/leo_storage_3.conf b/priv/test/mdcr-test/c1/leo_storage_3.conf new file mode 100644 index 00000000..1cfdcec1 --- /dev/null +++ b/priv/test/mdcr-test/c1/leo_storage_3.conf @@ -0,0 +1,366 @@ +#====================================================================== +# LeoFS - Storage Configuration +#====================================================================== +## -------------------------------------------------------------------- +## SASL +## -------------------------------------------------------------------- +## See: http://www.erlang.org/doc/man/sasl_app.html +## +## The following configuration parameters are defined for +## the SASL application. See app(4) for more information +## about configuration parameters + +## SASL error log path +## sasl.sasl_error_log = ./log/sasl/sasl-error.log + +## Restricts the error logging performed by the specified sasl_error_logger +## to error reports, progress reports, or both. +## errlog_type = [error | progress | all] +## sasl.errlog_type = error + +## Specifies in which directory the files are stored. +## If this parameter is undefined or false, the error_logger_mf_h is not installed. +# sasl.error_logger_mf_dir = ./log/sasl + +## Specifies how large each individual file can be. +## If this parameter is undefined, the error_logger_mf_h is not installed. +## sasl.error_logger_mf_maxbytes = 10485760 + +## Specifies how many files are used. +## If this parameter is undefined, the error_logger_mf_h is not installed. +## sasl.error_logger_mf_maxfiles = 5 + +## -------------------------------------------------------------------- +## Manager's Node(s) +## -------------------------------------------------------------------- +## Name of Manager node(s) +managers = [manager_0@127.0.0.1, manager_1@127.0.0.1] + +## -------------------------------------------------------------------- +## STORAGE +## -------------------------------------------------------------------- +## Object container +obj_containers.path = [./avs] +obj_containers.num_of_containers = [8] + +## e.g. Case of plural pathes +## obj_containers.path = [/var/leofs/avs/1, /var/leofs/avs/2] +## obj_containers.num_of_containers = [32, 64] + +## Metadata Storage: [bitcask, leveldb] - default:leveldb +obj_containers.metadata_storage = leveldb + +## A number of virtual-nodes for the redundant-manager +## num_of_vnodes = 168 + +## Enable strict check between checksum of a metadata and checksum of an object +## - default:faluse +## object_storage.is_strict_check = false + + +## -------------------------------------------------------------------- +## STORAGE - Watchdog +## -------------------------------------------------------------------- +## +## Watchdog.REX(RPC) +## +## rex - watch interval - default:5sec +## watchdog.rex.interval = 5 + +## Threshold memory capacity of binary for rex(rpc) - default:32MB +watchdog.rex.threshold_mem_capacity = 33554432 + + +## +## Watchdog.CPU +## +## Is cpu-watchdog enabled - default:false +watchdog.cpu.is_enabled = true + +## cpu - raised error times +watchdog.cpu.raised_error_times = 3 + +## cpu - watch interval - default:5sec +watchdog.cpu.interval = 5 + +## Threshold CPU load avg for 1min/5min +watchdog.cpu.threshold_cpu_load_avg = 5.0 + +## Threshold CPU load util - default:100 = "100%" +watchdog.cpu.threshold_cpu_util = 100 + + +## +## Watchdog.IO +## +## Is io-watchdog enabled - default:false +## watchdog.io.is_enabled = true + +## io - watch interval - default:1sec +watchdog.io.interval = 1 + +## Threshold input size/sec - default:134217728(B) - 128MB/sec +watchdog.io.threshold_input_per_sec = 134217728 + +## Threshold output size/sec - default:134217728(B) - 128MB/sec +watchdog.io.threshold_output_per_sec = 134217728 + + +## +## Watchdog.DISK +## +## Is disk-watchdog enabled - default:true +watchdog.disk.is_enabled = true + +## disk - raised error times +watchdog.disk.raised_error_times = 3 + +## disk - watch interval - default:1sec +watchdog.disk.interval = 5 + +## Threshold disk use% - defalut:85% +watchdog.disk.threshold_disk_use = 80 + +## Threshold disk util% - defalut:100% +watchdog.disk.threshold_disk_use = 100 + +## Threshold disk read kb/sec - defalut:131072(KB) +watchdog.disk.threshold_disk_rkb = 131072 + +## Threshold disk write kb/sec - defalut:65536(KB) +watchdog.disk.threshold_disk_wkb = 131072 + +## +## Watchdog.Cluster +## +## Is cluster-watchdog enabled - default:false +watchdog.cluster.is_enabled = true + +## cluster - watch interval - default:1sec +watchdog.cluster.interval = 10 + + +## -------------------------------------------------------------------- +## STORAGE - Autonomic Operation +## -------------------------------------------------------------------- +## [compaction] enabled compaction? - default:false +autonomic_op.compaction.is_enabled = true + +## [compaction] number of parallel procs - default:1 +## autonomic_op.compaction.parallel_procs = 1 + +## [compaction] warning ratio of active size - default:70% +## autonomic_op.compaction.warn_active_size_ratio = 70 + +## [compaction] threshold ratio of active size - default:60% +## autonomic_op.compaction.threshold_active_size_ratio = 60 + + +## -------------------------------------------------------------------- +## STORAGE - Data Compcation +## -------------------------------------------------------------------- +## Limit of a number of procs to execute data-compaction in parallel +##compaction.limit_num_of_compaction_procs = 4 + +## Minimum value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_min = 100 + +## Regular value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_regular = 300 + +## Maximum value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_max = 1000 + +## Step of compaction-proc waiting time(msec) +##compaction.waiting_time_step = 100 + + +## Minimum compaction batch processes +##compaction.batch_procs_min = 1000 + +## Regular compaction batch processes +##compaction.batch_procs_regular = 10000 + +## Maximum compaction batch processes +##compaction.batch_procs_max = 100000 + +## Step compaction batch processes +##compaction.batch_procs_step = 1000 + + +## -------------------------------------------------------------------- +## STORAGE - MQ +## -------------------------------------------------------------------- +## MQ backend storage: [bitcask, leveldb] - default:bitcask +## mq.backend_db = bitcask + +## A number of mq-server's processes +## mq.num_of_mq_procs = 8 + +## +## [Number of bach processes of message] +## +## Minimum number of bach processes of message +mq.num_of_batch_process_min = 100 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_max = 10000 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_regular = 1000 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_step = 100 + + +## -------------------------------------------------------------------- +## STORAGE - Replication/Recovery object(s) +## -------------------------------------------------------------------- +## Rack-id for the rack-awareness replica placement +## replication.rack_awareness.rack_id = + +## Size of stacked objects (bytes) +## replication.recovery.size_of_stacked_objs = 67108864 + +## Stacking timeout (msec) +## replication.recovery.stacking_timeout = 5000 + + +## -------------------------------------------------------------------- +## STORAGE - Log +## -------------------------------------------------------------------- +## Log level: [0:debug, 1:info, 2:warn, 3:error] +log.log_level = 0 + +## Output log file(s) - Erlang's log +## log.erlang = ./log/erlang + +## Output log file(s) - app +## log.app = ./log/app + +## Output log file(s) - members of storage-cluster +## log.member_dir = ./log/ring + +## Output log file(s) - ring +## log.ring_dir = ./log/ring + +## Output data-diagnosis log +log.is_enable_diagnosis_log = true + + +## -------------------------------------------------------------------- +## STORAGE - Other Directories +## -------------------------------------------------------------------- +## Directory of queue for monitoring "RING" +## queue_dir = ./work/queue + +## Directory of SNMP agent configuration +snmp_agent = ./snmp/snmpa_storage_3/LEO-STORAGE + + +## -------------------------------------------------------------------- +## Other Libs +## -------------------------------------------------------------------- +## MQ backend storage: [bitcask, leveldb] +## leo_mq.backend_db = bitcask + +## Enable strict check between checksum of a metadata and checksum of an object +## leo_object_storage.is_strict_check = false + +## Metadata Storage: [bitcask, leveldb] +leo_object_storage.metadata_storage = leveldb + +## Send after interval +## leo_ordning_reda.send_after_interval = 100 + + +## -------------------------------------------------------------------- +## RPC +## -------------------------------------------------------------------- +## RPC-Server's acceptors +rpc.server.acceptors = 16 + +## RPC-Server's listening port number +rpc.server.listen_port = 13080 + +## RPC-Server's listening timeout +rpc.server.listen_timeout = 30000 + +## RPC-Client's size of connection pool +rpc.client.connection_pool_size = 16 + +## RPC-Client's size of connection buffer +rpc.client.connection_buffer_size = 16 + + +## -------------------------------------------------------------------- +## Profiling +## -------------------------------------------------------------------- +## Enable profiler - leo_backend_db +## leo_backend_db.profile = false + +## Enable profiler - leo_logger +## leo_logger.profile = false + +## Enable profiler - leo_mq +## leo_mq.profile = false + +## Enable profiler - leo_object_storage +## leo_object_storage.profile = false + +## Enable profiler - leo_ordning_reda +## leo_ordning_reda.profile = false + +## Enable profiler - leo_redundant_manager +## leo_redundant_manager.profile = false + +## Enable profiler - leo_rpc +## leo_rpc.profile = false + +## Enable profiler - leo_statistics +## leo_statistics.profile = false + + +## -------------------------------------------------------------------- +## MANAGER - Mnesia +## * Store the info storage-cluster and the info of gateway(s) +## * Store the RING and the command histories +## -------------------------------------------------------------------- +## The write threshold for transaction log dumps +## as the number of writes to the transaction log +mnesia.dump_log_write_threshold = 50000 + +## Controls how often disc_copies tables are dumped from memory +mnesia.dc_dump_limit = 40 + + +#====================================================================== +# For vm.args +#====================================================================== +## Name of the leofs-storage node +nodename = storage_3@127.0.0.1 + +## Cookie for distributed node communication. All nodes in the same cluster +## should use the same cookie or they will not be able to communicate. +distributed_cookie = 401321b4 + +## Enable kernel poll +erlang.kernel_poll = true + +## Number of async threads +erlang.asyc_threads = 32 + +## Increase number of concurrent ports/sockets +erlang.max_ports = 64000 + +## Set the location of crash dumps +erlang.crash_dump = ./log/erl_crash.dump + +## Raise the ETS table limit +erlang.max_ets_tables = 256000 + +## Raise the default erlang process limit +process_limit = 1048576 + +## Path of SNMP-agent configuration +snmp_conf = ./snmp/snmpa_storage_3/leo_storage_snmp diff --git a/priv/test/mdcr-test/c2/leo_manager.conf.0 b/priv/test/mdcr-test/c2/leo_manager.conf.0 index 8dd54522..620ba3b3 100644 --- a/priv/test/mdcr-test/c2/leo_manager.conf.0 +++ b/priv/test/mdcr-test/c2/leo_manager.conf.0 @@ -61,16 +61,16 @@ system.cluster_id = leofs_2 ## * See: https://leo-project.net/leofs/docs/configuration/configuration_1.html ## -------------------------------------------------------------------- ## A number of replicas -consistency.num_of_replicas = 2 +consistency.num_of_replicas = 3 ## A number of replicas needed for a successful WRITE operation -consistency.write = 1 +consistency.write = 2 ## A number of replicas needed for a successful READ operation consistency.read = 1 ## A number of replicas needed for a successful DELETE operation -consistency.delete = 1 +consistency.delete = 2 ## A number of rack-aware replicas consistency.rack_aware_replicas = 0 @@ -82,8 +82,24 @@ consistency.rack_aware_replicas = 0 ## A number of replication targets mdc_replication.max_targets = 2 -## A number of replicas a DC -mdc_replication.num_of_replicas_a_dc = 1 +## A number of replicas per a datacenter +## [note] A local LeoFS sends a stacked object which contains an items of a replication method: +## - [L1_N] A number of replicas +## - [L1_W] A number of replicas needed for a successful WRITE operation +## - [L1_R] A number of replicas needed for a successful READ operation +## - [L1_D] A number of replicas needed for a successful DELETE operation +## A remote cluster of a LeoFS system which receives its object, +## and then replicates it by its contained reoplication method. +mdc_replication.num_of_replicas_a_dc = 2 + +## MDC replication / A number of replicas needed for a successful WRITE operation +mdc_replication.consistency.write = 1 + +## MDC replication / A number of replicas needed for a successful READ operation +mdc_replication.consistency.read = 1 + +## MDC replication / A number of replicas needed for a successful DELETE operation +mdc_replication.consistency.delete = 1 ## -------------------------------------------------------------------- diff --git a/priv/test/mdcr-test/c2/leo_storage_3.conf b/priv/test/mdcr-test/c2/leo_storage_3.conf new file mode 100644 index 00000000..1ae0c653 --- /dev/null +++ b/priv/test/mdcr-test/c2/leo_storage_3.conf @@ -0,0 +1,366 @@ +#====================================================================== +# LeoFS - Storage Configuration +#====================================================================== +## -------------------------------------------------------------------- +## SASL +## -------------------------------------------------------------------- +## See: http://www.erlang.org/doc/man/sasl_app.html +## +## The following configuration parameters are defined for +## the SASL application. See app(4) for more information +## about configuration parameters + +## SASL error log path +## sasl.sasl_error_log = ./log/sasl/sasl-error.log + +## Restricts the error logging performed by the specified sasl_error_logger +## to error reports, progress reports, or both. +## errlog_type = [error | progress | all] +## sasl.errlog_type = error + +## Specifies in which directory the files are stored. +## If this parameter is undefined or false, the error_logger_mf_h is not installed. +# sasl.error_logger_mf_dir = ./log/sasl + +## Specifies how large each individual file can be. +## If this parameter is undefined, the error_logger_mf_h is not installed. +## sasl.error_logger_mf_maxbytes = 10485760 + +## Specifies how many files are used. +## If this parameter is undefined, the error_logger_mf_h is not installed. +## sasl.error_logger_mf_maxfiles = 5 + +## -------------------------------------------------------------------- +## Manager's Node(s) +## -------------------------------------------------------------------- +## Name of Manager node(s) +managers = [manager_10@127.0.0.1, manager_11@127.0.0.1] + +## -------------------------------------------------------------------- +## STORAGE +## -------------------------------------------------------------------- +## Object container +obj_containers.path = [./avs] +obj_containers.num_of_containers = [8] + +## e.g. Case of plural pathes +## obj_containers.path = [/var/leofs/avs/1, /var/leofs/avs/2] +## obj_containers.num_of_containers = [32, 64] + +## Metadata Storage: [bitcask, leveldb] - default:leveldb +obj_containers.metadata_storage = leveldb + +## A number of virtual-nodes for the redundant-manager +## num_of_vnodes = 168 + +## Enable strict check between checksum of a metadata and checksum of an object +## - default:faluse +## object_storage.is_strict_check = false + + +## -------------------------------------------------------------------- +## STORAGE - Watchdog +## -------------------------------------------------------------------- +## +## Watchdog.REX(RPC) +## +## rex - watch interval - default:5sec +## watchdog.rex.interval = 5 + +## Threshold memory capacity of binary for rex(rpc) - default:32MB +watchdog.rex.threshold_mem_capacity = 33554432 + + +## +## Watchdog.CPU +## +## Is cpu-watchdog enabled - default:false +watchdog.cpu.is_enabled = true + +## cpu - raised error times +watchdog.cpu.raised_error_times = 3 + +## cpu - watch interval - default:5sec +watchdog.cpu.interval = 5 + +## Threshold CPU load avg for 1min/5min +watchdog.cpu.threshold_cpu_load_avg = 5.0 + +## Threshold CPU load util - default:100 = "100%" +watchdog.cpu.threshold_cpu_util = 100 + + +## +## Watchdog.IO +## +## Is io-watchdog enabled - default:false +## watchdog.io.is_enabled = true + +## io - watch interval - default:1sec +watchdog.io.interval = 1 + +## Threshold input size/sec - default:134217728(B) - 128MB/sec +watchdog.io.threshold_input_per_sec = 134217728 + +## Threshold output size/sec - default:134217728(B) - 128MB/sec +watchdog.io.threshold_output_per_sec = 134217728 + + +## +## Watchdog.DISK +## +## Is disk-watchdog enabled - default:true +watchdog.disk.is_enabled = true + +## disk - raised error times +watchdog.disk.raised_error_times = 3 + +## disk - watch interval - default:1sec +watchdog.disk.interval = 5 + +## Threshold disk use% - defalut:85% +watchdog.disk.threshold_disk_use = 80 + +## Threshold disk util% - defalut:100% +watchdog.disk.threshold_disk_use = 100 + +## Threshold disk read kb/sec - defalut:131072(KB) +watchdog.disk.threshold_disk_rkb = 131072 + +## Threshold disk write kb/sec - defalut:65536(KB) +watchdog.disk.threshold_disk_wkb = 131072 + +## +## Watchdog.Cluster +## +## Is cluster-watchdog enabled - default:false +watchdog.cluster.is_enabled = true + +## cluster - watch interval - default:1sec +watchdog.cluster.interval = 10 + + +## -------------------------------------------------------------------- +## STORAGE - Autonomic Operation +## -------------------------------------------------------------------- +## [compaction] enabled compaction? - default:false +autonomic_op.compaction.is_enabled = true + +## [compaction] number of parallel procs - default:1 +## autonomic_op.compaction.parallel_procs = 1 + +## [compaction] warning ratio of active size - default:70% +## autonomic_op.compaction.warn_active_size_ratio = 70 + +## [compaction] threshold ratio of active size - default:60% +## autonomic_op.compaction.threshold_active_size_ratio = 60 + + +## -------------------------------------------------------------------- +## STORAGE - Data Compcation +## -------------------------------------------------------------------- +## Limit of a number of procs to execute data-compaction in parallel +##compaction.limit_num_of_compaction_procs = 4 + +## Minimum value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_min = 100 + +## Regular value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_regular = 300 + +## Maximum value of compaction-proc waiting time/batch-proc(msec) +##compaction.waiting_time_max = 1000 + +## Step of compaction-proc waiting time(msec) +##compaction.waiting_time_step = 100 + + +## Minimum compaction batch processes +##compaction.batch_procs_min = 1000 + +## Regular compaction batch processes +##compaction.batch_procs_regular = 10000 + +## Maximum compaction batch processes +##compaction.batch_procs_max = 100000 + +## Step compaction batch processes +##compaction.batch_procs_step = 1000 + + +## -------------------------------------------------------------------- +## STORAGE - MQ +## -------------------------------------------------------------------- +## MQ backend storage: [bitcask, leveldb] - default:bitcask +## mq.backend_db = bitcask + +## A number of mq-server's processes +## mq.num_of_mq_procs = 8 + +## +## [Number of bach processes of message] +## +## Minimum number of bach processes of message +mq.num_of_batch_process_min = 100 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_max = 10000 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_regular = 1000 + +## Maxmim number of bach processes of message +mq.num_of_batch_process_step = 100 + + +## -------------------------------------------------------------------- +## STORAGE - Replication/Recovery object(s) +## -------------------------------------------------------------------- +## Rack-id for the rack-awareness replica placement +## replication.rack_awareness.rack_id = + +## Size of stacked objects (bytes) +## replication.recovery.size_of_stacked_objs = 67108864 + +## Stacking timeout (msec) +## replication.recovery.stacking_timeout = 5000 + + +## -------------------------------------------------------------------- +## STORAGE - Log +## -------------------------------------------------------------------- +## Log level: [0:debug, 1:info, 2:warn, 3:error] +log.log_level = 0 + +## Output log file(s) - Erlang's log +## log.erlang = ./log/erlang + +## Output log file(s) - app +## log.app = ./log/app + +## Output log file(s) - members of storage-cluster +## log.member_dir = ./log/ring + +## Output log file(s) - ring +## log.ring_dir = ./log/ring + +## Output data-diagnosis log +log.is_enable_diagnosis_log = true + + +## -------------------------------------------------------------------- +## STORAGE - Other Directories +## -------------------------------------------------------------------- +## Directory of queue for monitoring "RING" +## queue_dir = ./work/queue + +## Directory of SNMP agent configuration +snmp_agent = ./snmp/snmpa_storage_3/LEO-STORAGE + + +## -------------------------------------------------------------------- +## Other Libs +## -------------------------------------------------------------------- +## MQ backend storage: [bitcask, leveldb] +## leo_mq.backend_db = bitcask + +## Enable strict check between checksum of a metadata and checksum of an object +## leo_object_storage.is_strict_check = false + +## Metadata Storage: [bitcask, leveldb] +leo_object_storage.metadata_storage = leveldb + +## Send after interval +## leo_ordning_reda.send_after_interval = 100 + + +## -------------------------------------------------------------------- +## RPC +## -------------------------------------------------------------------- +## RPC-Server's acceptors +rpc.server.acceptors = 16 + +## RPC-Server's listening port number +rpc.server.listen_port = 13100 + +## RPC-Server's listening timeout +rpc.server.listen_timeout = 30000 + +## RPC-Client's size of connection pool +rpc.client.connection_pool_size = 16 + +## RPC-Client's size of connection buffer +rpc.client.connection_buffer_size = 16 + + +## -------------------------------------------------------------------- +## Profiling +## -------------------------------------------------------------------- +## Enable profiler - leo_backend_db +## leo_backend_db.profile = false + +## Enable profiler - leo_logger +## leo_logger.profile = false + +## Enable profiler - leo_mq +## leo_mq.profile = false + +## Enable profiler - leo_object_storage +## leo_object_storage.profile = false + +## Enable profiler - leo_ordning_reda +## leo_ordning_reda.profile = false + +## Enable profiler - leo_redundant_manager +## leo_redundant_manager.profile = false + +## Enable profiler - leo_rpc +## leo_rpc.profile = false + +## Enable profiler - leo_statistics +## leo_statistics.profile = false + + +## -------------------------------------------------------------------- +## MANAGER - Mnesia +## * Store the info storage-cluster and the info of gateway(s) +## * Store the RING and the command histories +## -------------------------------------------------------------------- +## The write threshold for transaction log dumps +## as the number of writes to the transaction log +mnesia.dump_log_write_threshold = 50000 + +## Controls how often disc_copies tables are dumped from memory +mnesia.dc_dump_limit = 40 + + +#====================================================================== +# For vm.args +#====================================================================== +## Name of the leofs-storage node +nodename = storage_13@127.0.0.1 + +## Cookie for distributed node communication. All nodes in the same cluster +## should use the same cookie or they will not be able to communicate. +distributed_cookie = leofs_c2 + +## Enable kernel poll +erlang.kernel_poll = true + +## Number of async threads +erlang.asyc_threads = 32 + +## Increase number of concurrent ports/sockets +erlang.max_ports = 64000 + +## Set the location of crash dumps +erlang.crash_dump = ./log/erl_crash.dump + +## Raise the ETS table limit +erlang.max_ets_tables = 256000 + +## Raise the default erlang process limit +process_limit = 1048576 + +## Path of SNMP-agent configuration +snmp_conf = ./snmp/snmpa_storage_3/leo_storage_snmp