一、概述
对RGW MULTISITE来说,在一个realm里,需要配置一个master zonegroup,
一个或多个secondary zonegroups(也配置多个独立的zonegroup;
在一个zonegroup中需要配置一个master zone,一个或多个secondary zones;
配置一个realm,包含一个master zonegroup,里面配置ceph集群1作为master zone,ceph集群2作为secondary zone;
二、配置流程
配置master zone:
创建pool1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#!/bin/bash
ceph osd pool create .rgw.root 32
ceph osd pool create zone02.rgw.control 32 32
ceph osd pool create zone02.rgw.data.root 32 32
ceph osd pool create zone02.rgw.gc 32 32
ceph osd pool create zone02.rgw.log 32 32
ceph osd pool create zone02.rgw.intent-log 32 32
ceph osd pool create zone02.rgw.usage 32 32
ceph osd pool create zone02.rgw.users.keys 32 32
ceph osd pool create zone02.rgw.users.email 32 32
ceph osd pool create zone02.rgw.users.swift 32 32
ceph osd pool create zone02.rgw.users.uid 32 32
ceph osd pool create zone02.rgw.buckets.index 32 32
ceph osd pool create zone02.rgw.buckets.data 32 32
ceph osd pool create zone02.rgw.meta 32 32
创建realm,zonegroup和master zone1
2
3
4# create realm, zonegroup and zone
radosgw-admin realm create --rgw-realm=cn --default
radosgw-admin zonegroup create --rgw-zonegroup=bj --endpoints=http://<self-ip>:80 --rgw-realm=cn --master --default
radosgw-admin zone create --rgw-zonegroup=bj --rgw-zone=bj-zone02 --endpoints=http://<self-ip>:80 --default --master
删除default的zonegrou,zone,更新period1
2
3
4
5
6
7# remove default zonegroup and zone, which maybe not needed
radosgw-admin zonegroup remove --rgw-zonegroup=default --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zone delete --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zonegroup delete --rgw-zonegroup=default
radosgw-admin period update --commit
创建同步需要的user1
2
3radosgw-admin user create --uid=zone.user --display-name="Zone User" --system
radosgw-admin zone modify --rgw-zone=bj-zone02 --access-key={system-key} --secret={secret}
radosgw-admin period update --commit
修改ceph.conf,启动radosgw1
2
3
4
5
6
7vim /etc/ceph/ceph.conf
[client.rgw.ceph]
host = ceph
rgw_frontends = "civetweb port=80"
rgw_zone=bj-zone02
service radosgw start id=rgw.ceph
配置secondary zone1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#!/bin/bash
ceph osd pool create .rgw.root 32
ceph osd pool create zone01.rgw.control 32 32
ceph osd pool create zone01.rgw.data.root 32 32
ceph osd pool create zone01.rgw.gc 32 32
ceph osd pool create zone01.rgw.log 32 32
ceph osd pool create zone01.rgw.intent-log 32 32
ceph osd pool create zone01.rgw.usage 32 32
ceph osd pool create zone01.rgw.users.keys 32 32
ceph osd pool create zone01.rgw.users.email 32 32
ceph osd pool create zone01.rgw.users.swift 32 32
ceph osd pool create zone01.rgw.users.uid 32 32
ceph osd pool create zone01.rgw.buckets.index 32 32
ceph osd pool create zone01.rgw.buckets.data 32 32
ceph osd pool create zone01.rgw.meta 32 32
获取master zone的realm,zonegroup,period信息1
2
3radosgw-admin realm pull --url=http://<master-zone-ip>:80 --access-key={system-key} --secret={secret}
radosgw-admin realm default --rgw-realm=cloudin
radosgw-admin period pull --url=http://<master-zone-ip>:80 --access-key={system-key} --secret={secret}
创建secondary zone1
2
3radosgw-admin zone create --rgw-zonegroup=bj --rgw-zone=bj-zone01 --endpoints=http://<self-ip>:80 --access-key={system-key} --secret={secret}
radosgw-admin zone delete --rgw-zone=default
radosgw-admin period update --commit
修改ceph.conf,启动radosgw1
2
3
4
5
6
7vim /etc/ceph/ceph.conf
[client.rgw.ceph0]
host = ceph0
rgw_frontends = "civetweb port=80"
rgw_zone=zone0`
service radosgw start id=rgw.ceph0
检查集群状态
master zone节点检查1
2
3
4
5
6
7
8
9
10
11
12
13root@ceph:~/rgw# radosgw-admin sync status
2016-10-26 11:18:45.124701 7fd18c502900 0 error in read_id for id : (2) No such file or directory
2016-10-26 11:18:45.125156 7fd18c502900 0 error in read_id for id : (2) No such file or directory
realm 0b64b20e-2a90-4fc4-a1d6-57fc674589sdf64 (cn)
zonegroup 1bfc8ccd-01ae-477e-a332-af4cfsd0d3f20 (bj)
zone 9f621425-cd68-4d2f-b3e7-e8581sdfaef2c (bj-zone01)
metadata sync no sync (zone is master)
data sync source: 249b96bd-8f86-4326-80e0-7fce78sddec1 (bj-zone02)
syncing
full sync: 0/128 shards
incremental sync: 128/128 shards
data is caught up with source
root@ceph:~rgw#