Hi,
is it a problem that the device class for all my disks is SSD even all of
these disks are NVME disks? If it is just a classification for ceph, so I
can have pools on SSDs and NVMEs separated I don't care. But maybe ceph
handles NVME disks differently internally?
I've added them via
ceph-volume lvm create --bluestore --data /dev/nvme2n1
and they only show up as ssd
root@a0423f621aaa:~# ceph osd metadata osd.0
{
"id": 0,
"arch": "x86_64",
...
"bluefs": "1",
"bluefs_dedicated_db": "0",
"bluefs_dedicated_wal": "0",
"bluefs_single_shared_device": "1",
"bluestore_bdev_access_mode": "blk",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_dev_node": "/dev/dm-2",
"bluestore_bdev_devices": "nvme0n1",
"bluestore_bdev_driver": "KernelDevice",
"bluestore_bdev_partition_path": "/dev/dm-2",
"bluestore_bdev_rotational": "0",
"bluestore_bdev_size": "1920378863616",
"bluestore_bdev_support_discard": "1",
"bluestore_bdev_type": "ssd",
"ceph_release": "pacific",
"ceph_version": "ceph version 16.2.13
(5378749ba6be3a0868b51803968ee9cde4833a3e) pacific (stable)",
"ceph_version_short": "16.2.13",
"ceph_version_when_created": "ceph version 16.2.13
(5378749ba6be3a0868b51803968ee9cde4833a3e) pacific (stable)",
"cpu": "Intel(R) Xeon(R) Gold 6226R CPU @ 2.90GHz",
"created_at": "2023-06-20T14:03:35.167741Z",
"default_device_class": "ssd",
"device_ids":
"nvme0n1=SAMSUNG_MZQLB1T9HAJR-00007_S439NF0M506164",
"device_paths":
"nvme0n1=/dev/disk/by-path/pci-0000:5e:00.0-nvme-1",
"devices": "nvme0n1",
"distro": "ubuntu",
"distro_description": "Ubuntu 20.04.6 LTS",
"distro_version": "20.04",
...
"journal_rotational": "0",
"kernel_description": "#169-Ubuntu SMP Tue Jun 6 22:23:09 UTC
2023",
"kernel_version": "5.4.0-152-generic",
"mem_swap_kb": "0",
"mem_total_kb": "196668116",
"network_numa_unknown_ifaces": "back_iface,front_iface",
"objectstore_numa_node": "0",
"objectstore_numa_nodes": "0",
"os": "Linux",
"osd_data": "/var/lib/ceph/osd/ceph-0",
"osd_objectstore": "bluestore",
"osdspec_affinity": "",
"rotational": "0"
}
Cheers
Boris