目录
一 工作目录
二 部署cephfs
三 挂载cephfs文件系统
四 配置客户端
五 挂载cephfs文件系统
六 数据验证
root@cephadm-deploy:~# cephadm shellInferring fsid 0888a64c-57e6-11ec-ad21-fbe9db6e2e74 Using recent ceph image quay.io/ceph/ceph@sha256:bb6a71f7f481985f6d3b358e3b9ef64c6755b3db5aa53198e0aac38be5c8ae54 root@cephadm-deploy:/#
官方文档:
https://docs.ceph.com/en/pacific/cephadm/services/mds/
https://docs.ceph.com/en/pacific/cephfs/#getting-started-with-cephfs
root@cephadm-deploy:/# ceph fs lsNo filesystems enabled
root@cephadm-deploy:/# ceph osd pool lsdevice_health_metrics
root@cephadm-deploy:/# ceph fs volume create wgs_cephfs --placement=3
root@cephadm-deploy:/# ceph osd pool lsdevice_health_metrics cephfs.wgs_cephfs.meta cephfs.wgs_cephfs.data
root@cephadm-deploy:/# ceph fs lsname: wgs_cephfs, metadata pool: cephfs.wgs_cephfs.meta, data pools: [cephfs.wgs_cephfs.data ]
root@cephadm-deploy:/# ceph mds statwgs_cephfs:1 {0=wgs_cephfs.cephadm-deploy.ztpmlk=up:active} 2 up:standby
root@cephadm-deploy:/# ceph orch ls NAME PORTS RUNNING REFRESHED AGE PLACEMENT alertmanager ?:9093,9094 1/1 3m ago 8h count:1 crash 5/5 9m ago 8h * grafana ?:3000 1/1 3m ago 8h count:1 mds.wgs_cephfs 3/3 3m ago 3m count:3 mgr 2/2 3m ago 8h count:2 mon 5/5 9m ago 8h count:5 node-exporter ?:9100 5/5 9m ago 8h * osd 1 3m ago - <unmanaged> osd.all-available-devices 14 9m ago 6h * prometheus ?:9095 1/1 3m ago 8h count:1
root@cephadm-deploy:/# ceph fs status wgs_cephfswgs_cephfs - 0 clients ========== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active wgs_cephfs.cephadm-deploy.ztpmlk Reqs: 0 /s 10 13 12 0 POOL TYPE USED AVAIL cephfs.wgs_cephfs.meta metadata 96.0k 94.9G cephfs.wgs_cephfs.data data 0 94.9G STANDBY MDS wgs_cephfs.ceph-node02.zpdphv wgs_cephfs.ceph-node01.ellktv MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)
root@cephadm-deploy:/# ceph -s cluster: id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74 health: HEALTH_OK services: mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 35m) mgr: cephadm-deploy.jgiulj(active, since 64m), standbys: ceph-node01.anwvfy mds: 1/1 daemons up, 2 standby osd: 15 osds: 15 up (since 35m), 15 in (since 35m) data: volumes: 1/1 healthy pools: 3 pools, 65 pgs objects: 22 objects, 2.3 KiB usage: 107 MiB used, 300 GiB / 300 GiB avail pgs: 65 active+clean
root@cephadm-deploy:/# ceph auth add client.wgs mon 'allow rw' mds 'allow rw' osd 'allow rwx pool=cephfs.wgs_cephfs.data'added key for client.wgs
root@cephadm-deploy:/# ceph auth get client.wgs[client.wgs] key = AQAdtrBhxOX9BhAAbJtiqOdNrOAE/BmZc1mlyw== caps mds = "allow rw" caps mon = "allow rw" caps osd = "allow rwx pool=cephfs.wgs_cephfs.data"exported keyring for client.wgs
root@cephadm-deploy:/# ceph auth get client.wgs -o ceph.client.wgs.keyringexported keyring for client.wgs
root@cephadm-deploy:/# cat ceph.client.wgs.keyring [client.wgs] key = AQAdtrBhxOX9BhAAbJtiqOdNrOAE/BmZc1mlyw== caps mds = "allow rw" caps mon = "allow rw" caps osd = "allow rwx pool=cephfs.wgs_cephfs.data"
root@cephadm-deploy:/# ceph auth print-key client.wgs > wgs.key
root@ceph-client01:~# wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -OK root@ceph-client01:~# echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific $(lsb_release -cs) main" >> /etc/apt/sources.listroot@ceph-client01:~# apt -y update && apt -y upgrade
root@ceph-client01:~# apt -y install ceph-common
root@cephadm-deploy:/# scp ceph.client.wgs.keyring wgs.key /etc/ceph/ceph.conf 192.168.174.121:/etc/ceph
root@ceph-client01:~# ceph --id wgs -s cluster: id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74 health: HEALTH_OK services: mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 58m) mgr: cephadm-deploy.jgiulj(active, since 86m), standbys: ceph-node01.anwvfy mds: 1/1 daemons up, 2 standby osd: 15 osds: 15 up (since 58m), 15 in (since 58m) data: volumes: 1/1 healthy pools: 3 pools, 65 pgs objects: 22 objects, 2.3 KiB usage: 107 MiB used, 300 GiB / 300 GiB avail pgs: 65 active+clean
root@ceph-client01:~# stat /sbin/mount.ceph File: /sbin/mount.ceph Size: 190888 Blocks: 376 IO Block: 4096 regular file Device: fd00h/64768d Inode: 273881 Links: 1 Access: (0755/-rwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) Access: 2021-12-08 21:55:01.000000000 +0800 Modify: 2021-09-06 16:41:31.000000000 +0800 Change: 2021-12-08 21:55:05.278599892 +0800 Birth: -
root@ceph-client01:~# mkdir /data/cephfs-data -pv
root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data -o name=wgs,secretfile=/etc/ceph/wgs.key
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on udev devtmpfs 982M 0 982M 0% /dev tmpfs tmpfs 206M 2.0M 204M 1% /run /dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.5G 9.5G 51% / tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data
root@ceph-client01:~# stat -f /data/cephfs-data/ File: "/data/cephfs-data/" ID: a55da9a5983f888c Namelen: 255 Type: ceph Block size: 4194304 Fundamental block size: 4194304 Blocks: Total: 24306 Free: 24306 Available: 24306 Inodes: Total: 0 Free: -1
root@cephadm-deploy:~# ceph fs status wgs_cephfswgs_cephfs - 1 clients #一个客户端========== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active wgs_cephfs.cephadm-deploy.ztpmlk Reqs: 0 /s 10 13 12 1 POOL TYPE USED AVAIL cephfs.wgs_cephfs.meta metadata 96.0k 94.9G cephfs.wgs_cephfs.data data 0 94.9G STANDBY MDS wgs_cephfs.ceph-node02.zpdphv wgs_cephfs.ceph-node01.ellktv MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)
root@ceph-client01:~# cat /etc/fstabmount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data -o name=wgs,secretfile=/etc/ceph/wgs.key
root@ceph-client01:~# mkdir -pv /data/cephfs-data01mkdir: created directory '/data/cephfs-data01'root@ceph-client01:~# mkdir -pv /data/cephfs-data02mkdir: created directory '/data/cephfs-data02'
root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data01 -o name=wgs,secretfile=/etc/ceph/wgs.keyroot@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data02 -o name=wgs,secretfile=/etc/ceph/wgs.key
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on udev devtmpfs 982M 0 982M 0% /dev tmpfs tmpfs 206M 2.0M 204M 1% /run /dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.5G 9.5G 51% / tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data01 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data02
root@ceph-client01:~# echo "/data/cephfs-data01" >> /data/cephfs-data01/data01.txt
root@ceph-client01:~# echo "/data/cephfs-data02" >> /data/cephfs-data01/data02.txt
root@ceph-client01:~# ls -l /data/cephfs-data01/total 1 -rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt -rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt root@ceph-client01:~# ls -l /data/cephfs-data02/total 1 -rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt -rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt