Ubuntu2204搭建ceph17


本次实验基于VMware17

节点 IP
storage01 192.168.200.161
storage01 192.168.200.162
storage01 192.168.200.163

环境初始化

初始化基础环境,三节点执行

#!/bin/bash

# 定义节点信息
NODES=("192.168.200.161 storage01 root" "192.168.200.162 storage02 root" "192.168.200.163 storage03 root")

# 定义当前节点的密码(默认集群统一密码)
HOST_PASS="000000"

# 时间同步的目标节点
TIME_SERVER=storage01

# 时间同步的地址段
TIME_SERVER_IP=192.160.200.0/24

# 欢迎界面
cat > /etc/motd <<EOF
 ################################
 #    Welcome  to  openstack    #
 ################################
EOF

# 修改主机名
for node in "${NODES[@]}"; do
  ip=$(echo "$node" | awk '{print $1}')
  hostname=$(echo "$node" | awk '{print $2}')

  # 获取当前节点的主机名和 IP
  current_ip=$(hostname -I | awk '{print $1}')
  current_hostname=$(hostname)

  # 检查当前节点与要修改的节点信息是否匹配
  if [[ "$current_ip" == "$ip" && "$current_hostname" != "$hostname" ]]; then
    echo "Updating hostname to $hostname on $current_ip..."
    hostnamectl set-hostname "$hostname"

    if [ $? -eq 0 ]; then
      echo "Hostname updated successfully."
    else
      echo "Failed to update hostname."
    fi

    break
  fi
done

# 遍历节点信息并添加到 hosts 文件
for node in "${NODES[@]}"; do
  ip=$(echo "$node" | awk '{print $1}')
  hostname=$(echo "$node" | awk '{print $2}')

  # 检查 hosts 文件中是否已存在相应的解析
  if grep -q "$ip $hostname" /etc/hosts; then
    echo "Host entry for $hostname already exists in /etc/hosts."
  else
    # 添加节点的解析条目到 hosts 文件
    sudo sh -c "echo '$ip $hostname' >> /etc/hosts"
    echo "Added host entry for $hostname in /etc/hosts."
  fi
done

if [[ ! -s ~/.ssh/id_rsa.pub ]]; then
    ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi

# 检查并安装 sshpass 工具
if ! which sshpass &> /dev/null; then
    echo "sshpass 工具未安装,正在安装 sshpass..."
    sudo apt-get install -y sshpass
fi

# 遍历所有节点进行免密操作
for node in "${NODES[@]}"; do
    ip=$(echo "$node" | awk '{print $1}')
    hostname=$(echo "$node" | awk '{print $2}')
    user=$(echo "$node" | awk '{print $3}')

    # 使用 sshpass 提供密码,并自动确认密钥
    sshpass -p "$HOST_PASS" ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.pub "$user@$hostname"
done

# 时间同步
apt install -y chrony
if [[ $TIME_SERVER_IP == *$(hostname -I)* ]]; then
    # 配置当前节点为时间同步源
    sed -i '20,23s/^/#/g' /etc/chrony/chrony.conf
    echo "server $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.conf
    echo "allow $TIME_SERVER_IP" >> /etc/chrony/chrony.conf
    echo "local stratum 10" >> /etc/chrony/chrony.conf
else
    # 配置当前节点同步到目标节点
    sed -i '20,23s/^/#/g' /etc/chrony/chrony.conf
    echo "pool $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.conf
fi

# 重启并启用 chrony 服务
systemctl restart chronyd
systemctl enable chronyd

echo "###############################################################"
echo "#################      集群初始化成功     #####################"
echo "###############################################################"

搭建Ceph

配置离线源

tar zxvf ceph_quincy.tar.gz -C /opt/

cp /etc/apt/sources.list{,.bak}

cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph_quincy/debs/
EOF

apt-get clean all
apt-get update

配置时间同步

# 可配置开启
timedatectl set-ntp true

# 配置上海时区
timedatectl set-timezone Asia/Shanghai

# 系统时钟与硬件时钟同步
hwclock --systohc

所有节点安装docker

apt -y install docker-ce

01节点安装cephadm和ceph工具

apt install -y cephadm ceph-common

所有节点导入镜像

docker load -i cephadm_images_v17.tar

01节点配置仓库

# 导入镜像
docker load -i registry.tar

# 启动
docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef

所有节点配置地址

cat >> /etc/docker/daemon.json << EOF
{
"insecure-registries":["192.168.200.161:5000"]
}
EOF

systemctl daemon-reload
systemctl restart docker

01节点推送

docker tag 0912465dcea5 192.168.200.161:5000/ceph:v17
docker push 192.168.200.161:5000/ceph:v17
cd /etc/ceph

01节点初始化集群

cephadm --image 192.168.200.161:5000/ceph:v17 bootstrap --mon-ip 192.168.200.161 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull

修改HTTPS端口号(往下选做)

ceph config set mgr mgr/dashboard/ssl_server_port 5050

关闭dashboard证书认证

ceph config set mgr mgr/dashboard/ssl false 

指定 dashboard 监听地址

ceph config set mgr mgr/dashboard/server_addr 0.0.0.0

指定 dashboard 监听端口

ceph config set mgr mgr/dashboard/server_port 5050

重启dashboard模块生效(往上选做)

ceph mgr module disable dashboard

ceph mgr module enable dashboard

加入集群

ssh-copy-id -f -i /etc/ceph/ceph.pub storage02

ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
ceph orch host add storage02

ceph orch host add storage03

查看集群

root@storage01:/etc/ceph# ceph -s
  cluster:
    id:     4569c748-fc81-11ee-872a-7f1819cf2453
    health: HEALTH_WARN
            1 stray daemon(s) not managed by cephadm
            OSD count 0 < osd_pool_default_size 3

  services:
    mon: 2 daemons, quorum storage02,storage03 (age 10m)
    mgr: storage03.lnyuay(active, since 68s)
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

root@storage01:/etc/ceph#

界面访问:IP:8443

有问题可删除集群

cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force

查看可用磁盘设备

ceph orch device ls
root@storage01:/etc/ceph# ceph orch device ls
HOST       PATH      TYPE  DEVICE ID   SIZE  AVAILABLE  REFRESHED  REJECT REASONS
storage01  /dev/sdb  hdd               107G  Yes        19m ago
storage02  /dev/sdb  hdd               107G  Yes        12m ago
storage03  /dev/sdb  hdd               107G  Yes        12m ago

创建OSD

ceph orch daemon add osd storage01:/dev/sdb
ceph orch daemon add osd storage02:/dev/sdb
ceph orch daemon add osd storage03:/dev/sdb

查看验证

root@storage01:/etc/ceph# ceph -s
  cluster:
    id:     4569c748-fc81-11ee-872a-7f1819cf2453
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum storage03,storage01,storage02 (age 54s)
    mgr: storage01.gitwte(active, since 110s)
    osd: 3 osds: 3 up (since 5m), 3 in (since 5m)

  data:
    pools:   1 pools, 1 pgs
    objects: 2 objects, 449 KiB
    usage:   62 MiB used, 300 GiB / 300 GiB avail
    pgs:     1 active+clean
    
root@storage01:/etc/ceph# ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL    USED  RAW USED  %RAW USED
hdd    300 GiB  300 GiB  62 MiB    62 MiB       0.02
TOTAL  300 GiB  300 GiB  62 MiB    62 MiB       0.02

--- POOLS ---
POOL  ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr   1    1  449 KiB        2  1.3 MiB      0     95 GiB
root@storage01:/etc/ceph#

CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据

ceph osd pool create cephfs-metadata 16 16

ceph osd pool create cephfs-data 32 32

ceph fs new cephfs cephfs-metadata cephfs-data

ceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"

root@storage01:/etc/ceph# ceph -s
  cluster:
    id:     4569c748-fc81-11ee-872a-7f1819cf2453
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum storage03,storage02,storage01 (age 10s)
    mgr: storage01.gitwte(active, since 10m)
    mds: 1/1 daemons up, 2 standby
    osd: 3 osds: 3 up (since 14m), 3 in (since 14m)
    rgw: 3 daemons active (3 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   7 pools, 177 pgs
    objects: 216 objects, 457 KiB
    usage:   104 MiB used, 300 GiB / 300 GiB avail
    pgs:     177 active+clean

root@storage01:/etc/ceph#

存储对象存储

ceph orch apply rgw myorg cn-east-1 --placement="3 storage01 storage02 storage03"
root@storage01:/etc/ceph# ceph orch ls
NAME                       PORTS        RUNNING  REFRESHED  AGE  PLACEMENT
alertmanager               ?:9093,9094      1/1  47s ago    36m  count:1
crash                                       3/3  3m ago     36m  *
grafana                    ?:3000           1/1  47s ago    36m  count:1
mds.cephfs                                  3/3  3m ago     5m   storage01;storage02;storage03;count:3
mgr                                         1/1  47s ago    11m  storage01
mon                                         3/1  3m ago     55s  storage01
node-exporter              ?:9100           3/3  3m ago     36m  *
osd.all-available-devices                     3  3m ago     15m  *
prometheus                 ?:9095           1/1  47s ago    36m  count:1
rgw.myorg                  ?:80             3/3  3m ago     4m   storage01;storage02;storage03;count:3
root@storage01:/etc/ceph#

相关推荐

  1. Ubuntu2204ceph17

    2024-07-18 13:04:05       19 阅读
  2. CEPH

    2024-07-18 13:04:05       54 阅读
  3. 【ESP32】Ubuntu2004espressif

    2024-07-18 13:04:05       50 阅读
  4. Ubuntu18Kubernetes集群

    2024-07-18 13:04:05       58 阅读
  5. 【环境Ubuntu16 C++

    2024-07-18 13:04:05       35 阅读
  6. ceph集群详细教程(ceph-deploy)

    2024-07-18 13:04:05       48 阅读

最近更新

  1. docker php8.1+nginx base 镜像 dockerfile 配置

    2024-07-18 13:04:05       70 阅读
  2. Could not load dynamic library ‘cudart64_100.dll‘

    2024-07-18 13:04:05       74 阅读
  3. 在Django里面运行非项目文件

    2024-07-18 13:04:05       62 阅读
  4. Python语言-面向对象

    2024-07-18 13:04:05       72 阅读

热门阅读

  1. npm安装依赖包的多种镜像及方法

    2024-07-18 13:04:05       22 阅读
  2. flutter高德地图release闪退

    2024-07-18 13:04:05       20 阅读
  3. 理解Go 语言中读写锁 RWMutex

    2024-07-18 13:04:05       19 阅读
  4. Vim(Vi IMproved)

    2024-07-18 13:04:05       24 阅读
  5. 新员工入职通识考试

    2024-07-18 13:04:05       21 阅读
  6. CSS文本样式:打造精美网页文字的艺术

    2024-07-18 13:04:05       23 阅读
  7. Python--print函数的end参数

    2024-07-18 13:04:05       23 阅读
  8. 《栈和队列学习笔记》

    2024-07-18 13:04:05       17 阅读
  9. js中使用箭头函数以及setTimeout时this的指向问题

    2024-07-18 13:04:05       21 阅读