health: HEALTH_WARN
1 daemons have recently crashed
解决方法如下:
[root@node1 data-rbd0]# ceph crash ls-new
ID ENTITY NEW
2020-11-04_02:41:35.597537Z_a805ac69-6d0e-410b-8bd6-8e69b49ace2e client.rgw.node1 *
[root@node1 data-rbd0]# ceph crash info 2020-11-04_02:41:35.597537Z_a805ac69-6d0e-410b-8bd6-8e69b49ace2e
{
"os_version_id": "7",
"utsname_release": "3.10.0-1062.el7.x86_64",
"os_name": "CentOS Linux",
"entity_name": "client.rgw.node1",
"timestamp": "2020-11-04 02:41:35.597537Z",
"process_name": "radosgw",
"utsname_machine": "x86_64",
"utsname_sysname": "Linux",
"os_version": "7 (Core)",
"os_id": "centos",
"utsname_version": "#1 SMP Wed Aug 7 18:08:02 UTC 2019",
"backtrace": [
"(()+0xf5f0) [0x7f5da58375f0]",
"(gsignal()+0x37) [0x7f5da4c71337]",
"(abort()+0x148) [0x7f5da4c72a28]",
"(__gnu_cxx::__verbose_terminate_handler()+0x165) [0x7f5da55817d5]",
"(()+0x5e746) [0x7f5da557f746]",
"(()+0x5e773) [0x7f5da557f773]",
"(()+0x5e993) [0x7f5da557f993]",
"(()+0x178bb) [0x7f5db17928bb]",
"(tcmalloc::allocate_full_cpp_throw_oom(unsigned long)+0xf3) [0x7f5db17b0b83]",
"(std::string::_Rep::_S_create(unsigned long, unsigned long, std::allocator<char> const&)+0x59) [0x555f0e3533f9]",
"(std::string::_Rep::_M_clone(std::allocator<char> const&, unsigned long)+0x1b) [0x555f0e35343b]",
"(RGWObjectExpirer::process_single_shard(std::string const&, utime_t const&, utime_t const&)+0xf0) [0x555f0e81c9b0]",
"(RGWObjectExpirer::inspect_all_shards(utime_t const&, utime_t const&)+0x174) [0x555f0e81d074]",
"(RGWObjectExpirer::OEWorker::entry()+0x275) [0x555f0e81d365]",
"(()+0x7e65) [0x7f5da582fe65]",
"(clone()+0x6d) [0x7f5da4d3988d]"
],
"utsname_hostname": "node1",
"crash_id": "2020-11-04_02:41:35.597537Z_a805ac69-6d0e-410b-8bd6-8e69b49ace2e",
"ceph_version": "14.2.12"
}
[root@node1 data-rbd0]# ceph crash archive 2020-11-04_02:41:35.597537Z_a805ac69-6d0e-410b-8bd6-8e69b49ace2e
[root@node1 data-rbd0]# ceph -s
cluster:
id: 57e0373d-3f5e-453f-a172-3004586b3a54
health: HEALTH_OK
services:
mon: 4 daemons, quorum node1,node2,node3,node4 (age 28h)
mgr: node2(active, since 3d), standbys: node3, node1
mds: cephfs-demo:1 {0=node2=up:active} 1 up:standby
osd: 5 osds: 5 up (since 19m), 5 in (since 18m)
rgw: 3 daemons active (node1, node2, node3)
task status:
scrub status:
mds.node2: idle
data:
pools: 10 pools, 352 pgs
objects: 1.35k objects, 2.0 GiB
usage: 11 GiB used, 69 GiB / 80 GiB avail
pgs: 352 active+clean
[root@node1 data-rbd0]# ceph health
HEALTH_OK