ceph osd pool ls detail pool 2 'cephfs_metadata' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 128 pgp_num 128 autoscale_mode on last_change 4051 lfor 0/0/3797 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 target_size_ratio 0.8 application cephfs pool 3 'cephfs_data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 3736 lfor 0/3266/3582 flags hashpspool stripe_width 0 target_size_ratio 0.8 application cephfs pool 4 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 4156 lfor 0/4156/4154 flags hashpspool,selfmanaged_snaps stripe_width 0 target_size_ratio 0.8 application rbd removed_snaps [1~3]