0%

学习视频地址:https://coding.imooc.com/class/189.html

1
2
3
4
5
6
单机
Bridge Network
Host Network
None
多机
Overlay Network

抓包工具

1
https://www.wireshark.org

Vagrant创建两台主机

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
yujiangdeMBP-13:docker-network yujiang$ cat Vagrantfile 
# -*- mode: ruby -*-
# vi: set ft=ruby :

Vagrant.require_version ">= 1.6.0"
boxes = [
{
:name => "docker-node1",
:eth1 => "192.168.56.61",
:mem => "1024",
:cpu => "1"
},
{
:name => "docker-node2",
:eth1 => "192.168.56.62",
:mem => "1024",
:cpu => "1"
},
]

Vagrant.configure(2) do |config|
config.vm.box = "centos/centos7"
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = opts[:mem]
v.vmx["numvcpus"] = opts[:cpu]
end

config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", opts[:mem]]
v.customize ["modifyvm", :id, "--cpus", opts[:cpu]]
end
config.vm.network :private_network, ip:opts[:eth1]
end
end
config.vm.provision "shell", inline: <<-SHELL
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce -y
sudo systemctl start docker
sudo systemctl enable docker
sudo groupadd docker
sudo gpasswd -a vagrant docker
SHELL

# 问题 mount: unnown filesystem type 'vboxsf'
#config.vm.synced_folder "./share", "/Users/yujiang/Vagrant/docker-network"
#config.vm.provision "shell", privileged: true, path: "./setup.sh"
end

linux network namespace

http://cizixs.com/2017/02/10/network-virtualization-network-namespace/

https://tonybai.com/2017/01/11/understanding-linux-network-namespace-for-docker-network/

1
2
3
4
[vagrant@docker-node1 ~]$ docker run -d --name test1 busybox /bin/sh -c "while true;do sleep 3600; done"
[vagrant@docker-node1 ~]$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f9a344634aab busybox "/bin/sh -c 'while t…" 11 seconds ago Up 10 seconds test1

查看宿主机的network namespace

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
[vagrant@docker-node1 ~]$ sudo ip netns list
[vagrant@docker-node1 ~]$ sudo ip netns add test1
[vagrant@docker-node1 ~]$ sudo ip netns add test2
[vagrant@docker-node1 ~]$ sudo ip netns list
test2
test1
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link set dev lo up
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever

[vagrant@docker-node1 ~]$ sudo ip link add veth-test1 type veth peer name veth-test2
[vagrant@docker-node1 ~]$ ip link
5: veth-test2@veth-test1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff
6: veth-test1@veth-test2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff

[vagrant@docker-node1 ~]$ sudo ip link set veth-test1 netns test1
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
6: veth-test1@if5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff link-netnsid 0
[vagrant@docker-node1 ~]$ ip link
5: veth-test2@if6: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0

[vagrant@docker-node1 ~]$ sudo ip link set veth-test2 netns test2
[vagrant@docker-node1 ~]$ ip link
5: veth-test2@if6也不见了
[vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip link
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
5: veth-test2@if6: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0

分配IP地址
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip addr add 192.168.1.2/24 dev veth-test1
[vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip addr add 192.168.1.3/24 dev veth-test2
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link set dev veth-test1 up
[vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip link set dev veth-test2 up
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
6: veth-test1@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet 192.168.1.2/24 scope global veth-test1
valid_lft forever preferred_lft forever
inet6 fe80::3c20:9cff:feb8:9c2b/64 scope link
valid_lft forever preferred_lft forever
[vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip a
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
5: veth-test2@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 192.168.1.3/24 scope global veth-test2
valid_lft forever preferred_lft forever
inet6 fe80::9492:53ff:fe64:5817/64 scope link
valid_lft forever preferred_lft forever

互ping
[vagrant@docker-node1 ~]$ sudo ip netns exec test1 ping 192.168.1.3
PING 192.168.1.3 (192.168.1.3) 56(84) bytes of data.
64 bytes from 192.168.1.3: icmp_seq=1 ttl=64 time=0.051 ms
[vagrant@docker-node1 ~]$ sudo ip netns exec test2 ping 192.168.1.2
PING 192.168.1.2 (192.168.1.2) 56(84) bytes of data.
64 bytes from 192.168.1.2: icmp_seq=1 ttl=64 time=0.035 ms

Bridge0

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
[vagrant@docker-node1 ~]$ docker network ls
NETWORK ID NAME DRIVER SCOPE
36c9fe545daf bridge bridge local
39385556d8cd host host local
7828c2433efd none null local
[vagrant@docker-node1 ~]$ docker network inspect 36c9fe545daf
[
{
"Name": "bridge",
"Id": "36c9fe545daf2ab0917fce9b1a8edee5ebdd4cb375b1083439212506f3fe179c",
"Created": "2018-12-15T19:24:36.985544361Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": { # test1这个container连到了bridge这个网络
"f6c6d9b55defc79cf5a39feff1dde1da336f3aa29e68a7577428aefab0196f6b": {
"Name": "test1",
"EndpointID": "12a9fa5786c42c486e46bb058e44eb811f8d293ca4a89e1203df134c393d0254",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
[vagrant@docker-node1 ~]$ ip a
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:42:8b:4b:10 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:42ff:fe8b:4b10/64 scope link
valid_lft forever preferred_lft forever
8: veth2657408@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether a2:0b:d6:4a:77:e5 brd ff:ff:ff:ff:ff:ff link-netnsid 2
inet6 fe80::a00b:d6ff:fe4a:77e5/64 scope link
valid_lft forever preferred_lft forever
[vagrant@docker-node1 ~]$ docker exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
7: eth0@if8: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
veth2657408负责连到docker0,test1 container中有一个接口,test1 container中的eth0与外面的veth2657408是一对。
[vagrant@docker-node1 ~]$ sudo yum install bridge-utils -y
[vagrant@docker-node1 ~]$ brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242428b4b10 no veth2657408
同一台宿主机上的container是通过docker0相互通信的。docker0通过NAT来使container访问Internet。

自建Bridge

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
[vagrant@docker-node1 ~]$ docker network create -d bridge my-bridge
7cbd844d6e4cf262eba12277a3e8b4c475a1fb6aa73b9ea312d113d6c519284a
[vagrant@docker-node1 ~]$ docker network ls
NETWORK ID NAME DRIVER SCOPE
36c9fe545daf bridge bridge local
39385556d8cd host host local
7cbd844d6e4c my-bridge bridge local
7828c2433efd none null local
[vagrant@docker-node1 ~]$ brctl show
bridge name bridge id STP enabled interfaces
br-7cbd844d6e4c 8000.02425dd71cb3 no
docker0 8000.0242428b4b10 no veth2657408
veth7687c87
[vagrant@docker-node1 ~]$ docker run -d --name test3 --network my-bridge busybox /bin/sh -c "while true; do sleep 3600; done"
[vagrant@docker-node1 ~]$ brctl show
bridge name bridge id STP enabled interfaces
br-7cbd844d6e4c 8000.02425dd71cb3 no vetha530f6b
docker0 8000.0242428b4b10 no veth2657408
veth7687c87
[vagrant@docker-node1 ~]$ docker network ls
NETWORK ID NAME DRIVER SCOPE
36c9fe545daf bridge bridge local
39385556d8cd host host local
7cbd844d6e4c my-bridge bridge local
7828c2433efd none null local
[vagrant@docker-node1 ~]$ docker network inspect 7cbd844d6e4c
[
{
"Name": "my-bridge",
"Id": "7cbd844d6e4cf262eba12277a3e8b4c475a1fb6aa73b9ea312d113d6c519284a",
"Created": "2018-12-16T13:17:25.765613424Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.18.0.0/16",
"Gateway": "172.18.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"5adef860247e056b8f8d2fab89892e10573daff5ea598482f53cefe3152700e0": {
"Name": "test3",
"EndpointID": "19f5d575ad3ff8602c7342f036f024e88c4b97c33a68efa206bd810128b82ddc",
"MacAddress": "02:42:ac:12:00:02",
"IPv4Address": "172.18.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
连接到test2上
[vagrant@docker-node1 ~]$ docker network connect my-bridge test2
[vagrant@docker-node1 ~]$ docker network inspect 7cbd844d6e4c
[
{
"Name": "my-bridge",
"Id": "7cbd844d6e4cf262eba12277a3e8b4c475a1fb6aa73b9ea312d113d6c519284a",
"Created": "2018-12-16T13:17:25.765613424Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.18.0.0/16",
"Gateway": "172.18.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"4b7db28e2a457ea6102fd8f06b512041e502dcf54c6d7c362fc5e39f7a0cdf23": {
"Name": "test2",
"EndpointID": "cf4a07fff8a8684cd179ae747630ceeef32ebd38b2fb5eb9db4d2d35759970fb",
"MacAddress": "02:42:ac:12:00:03",
"IPv4Address": "172.18.0.3/16",
"IPv6Address": ""
},
"5adef860247e056b8f8d2fab89892e10573daff5ea598482f53cefe3152700e0": {
"Name": "test3",
"EndpointID": "19f5d575ad3ff8602c7342f036f024e88c4b97c33a68efa206bd810128b82ddc",
"MacAddress": "02:42:ac:12:00:02",
"IPv4Address": "172.18.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
查看默认的bridge,都有test2,说明test2 Container及连到了默认的bridge又连到了my-bridge。
[vagrant@docker-node1 ~]$ docker network ls
NETWORK ID NAME DRIVER SCOPE
36c9fe545daf bridge bridge local
39385556d8cd host host local
7cbd844d6e4c my-bridge bridge local
7828c2433efd none null local
[vagrant@docker-node1 ~]$ docker network inspect 36c9fe545daf
[
{
"Name": "bridge",
"Id": "36c9fe545daf2ab0917fce9b1a8edee5ebdd4cb375b1083439212506f3fe179c",
"Created": "2018-12-15T19:24:36.985544361Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"4b7db28e2a457ea6102fd8f06b512041e502dcf54c6d7c362fc5e39f7a0cdf23": {
"Name": "test2",
"EndpointID": "4ba2bbe1ee63a797b528e25533fd09dda8921cf5f688d4a5a24db4efed7410e2",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"f6c6d9b55defc79cf5a39feff1dde1da336f3aa29e68a7577428aefab0196f6b": {
"Name": "test1",
"EndpointID": "12a9fa5786c42c486e46bb058e44eb811f8d293ca4a89e1203df134c393d0254",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
[vagrant@docker-node1 ~]$ docker exec test2 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
9: eth0@if10: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
14: eth1@if15: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:03 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.3/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever

[vagrant@docker-node1 ~]$ docker exec -it test3 /bin/sh
/ # ping 172.18.0.3
PING 172.18.0.3 (172.18.0.3): 56 data bytes
64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.727 ms
/ # ping test2
PING test2 (172.18.0.3): 56 data bytes
64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.058 ms
Container连接到用户自己创建的bridge上,默认是link好的。所以使用Container name也能通。如果连接到docker0则不是。

Container之间link

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[vagrant@docker-node1 ~]$ docker run -d --name test1 busybox /bin/sh -c "while true;do sleep 3600; done"
[vagrant@docker-node1 ~]$ sudo docker run -d --name test2 --link test1 busybox /bin/sh -c "while true; do sleep 3600; done"

[vagrant@docker-node1 ~]$ docker exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
7: eth0@if8: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
[vagrant@docker-node1 ~]$ docker exec test2 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
9: eth0@if10: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever

[vagrant@docker-node1 ~]$ docker exec -it test2 /bin/sh
/ # ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2): 56 data bytes
64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.419 ms
/ # ping test1 (ping Container名字也是通的)
PING test1 (172.17.0.2): 56 data bytes
64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.082 ms

docker 端口映射

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
docker run --name nginx -d -p <容器中端口>:<宿主机端口> nginx
[vagrant@docker-node1 ~]$ docker run --name nginx -d -p 80:80 nginx
[vagrant@docker-node1 ~]$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82d01fa9a547 nginx "nginx -g 'daemon of…" 14 seconds ago Up 13 seconds 0.0.0.0:80->80/tcp nginx
[vagrant@docker-node1 ~]$ curl 127.0.0.1
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

HOST与NONE网络

NONE,只能使用docker exec来访问Container。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
[vagrant@docker-node1 ~]$ docker run -d --name test1 --network none busybox /bin/sh -c "while true;do sleep 3600;done"
005cd306dc9e51a50a9bff5a2f2a9c54297c2dfe786443787d54df1d0a10e8bb
[vagrant@docker-node1 ~]$ docker network inspect none
[
{
"Name": "none",
"Id": "7828c2433efde412a4e8d4ffa9a92e121fb6391a37687fda9e2b09f8304ff12b",
"Created": "2018-12-15T18:17:04.614441655Z",
"Scope": "local",
"Driver": "null",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": []
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"005cd306dc9e51a50a9bff5a2f2a9c54297c2dfe786443787d54df1d0a10e8bb": {
"Name": "test1",
"EndpointID": "46a077f2a809d0073a6c9251897e4dd1e47dd785c6f13f32056dfb13e32f0ded",
"MacAddress": "",
"IPv4Address": "",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
[vagrant@docker-node1 ~]$ docker exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever

HOST,与宿主机IP相同。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
[vagrant@docker-node1 ~]$ docker run -d --name test1 --network host busybox /bin/sh -c "while true;do sleep 3600;done"
0bf2514b4ce329a56491409d6470b0bc8374824be805bec6c5e387ecbb10713f
[vagrant@docker-node1 ~]$ docker network inspect host
[
{
"Name": "host",
"Id": "39385556d8cda0864b8911f06c7c0b79d1858f2d16b6b31fd34e5e87cb8ac08b",
"Created": "2018-12-15T18:17:04.625179187Z",
"Scope": "local",
"Driver": "host",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": []
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"0bf2514b4ce329a56491409d6470b0bc8374824be805bec6c5e387ecbb10713f": {
"Name": "test1",
"EndpointID": "adfa701db6a153c3fafd4b9fb4d10b35b47872afd9f7595f0b5ee7b9089d59df",
"MacAddress": "",
"IPv4Address": "",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
[vagrant@docker-node1 ~]$ docker exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 52:54:00:47:2c:0d brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic eth0
valid_lft 84405sec preferred_lft 84405sec
inet6 fe80::5054:ff:fe47:2c0d/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue
link/ether 02:42:20:4d:38:8a brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:20ff:fe4d:388a/64 scope link
valid_lft forever preferred_lft forever
4: br-7cbd844d6e4c: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue
link/ether 02:42:be:6a:02:b8 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global br-7cbd844d6e4c
valid_lft forever preferred_lft forever
5: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 08:00:27:2a:89:41 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.61/24 brd 192.168.56.255 scope global eth1
valid_lft forever preferred_lft forever
inet6 fe80::a00:27ff:fe2a:8941/64 scope link
valid_lft forever preferred_lft forever
7: vethd963769@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master docker0
link/ether ee:f6:51:e6:91:e3 brd ff:ff:ff:ff:ff:ff
inet6 fe80::ecf6:51ff:fee6:91e3/64 scope link
valid_lft forever preferred_lft forever

多机通信 Overlay

参考资料:https://github.com/docker/labs/blob/master/networking/concepts/06-overlay-networks.md

1
2
3
4
5
Overlay
+
VXLAN(VXLAN,https://cizixs.com/2017/09/25/vxlan-protocol-introduction/)
+
隧道(Ethernet、IPV4、UDP)

需要借用etcd,记录多机上的Container没有重复IP。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
搭建etcd集群
[vagrant@docker-node1 ~]$ sudo yum install wget -y && wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz && tar zxvf etcd-v3.3.10-linux-amd64.tar.gz && cd etcd-v3.3.10-linux-amd64/
[vagrant@docker-node2 ~]$ sudo yum install wget -y && wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz && tar zxvf etcd-v3.3.10-linux-amd64.tar.gz && cd etcd-v3.3.10-linux-amd64/

[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ nohup ./etcd --name docker-node1 \
--initial-advertise-peer-urls http://192.168.56.61:2380 \
--listen-peer-urls http://192.168.56.61:2380 \
--listen-client-urls http://192.168.56.61:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.56.61:2379 \
--initial-cluster-token etcd-cluster \
--initial-cluster docker-node1=http://192.168.56.61:2380,docker-node2=http://192.168.56.62:2380 \
--initial-cluster-state new&
[vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ nohup ./etcd --name docker-node2 \
--initial-advertise-peer-urls http://192.168.56.62:2380 \
--listen-peer-urls http://192.168.56.62:2380 \
--listen-client-urls http://192.168.56.62:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.56.62:2379 \
--initial-cluster-token etcd-cluster \
--initial-cluster docker-node1=http://192.168.56.61:2380,docker-node2=http://192.168.56.62:2380 \
--initial-cluster-state new&

[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ ./etcdctl cluster-health
member 14192bed1b668a6 is healthy: got healthy result from http://192.168.56.61:2379
member 80c395b734da48f6 is healthy: got healthy result from http://192.168.56.62:2379
cluster is healthy
[vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ ./etcdctl cluster-health
member 14192bed1b668a6 is healthy: got healthy result from http://192.168.56.61:2379
member 80c395b734da48f6 is healthy: got healthy result from http://192.168.56.62:2379
cluster is healthy

[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ sudo service docker stop
[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ sudo /usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://192.168.56.61:2379 --cluster-advertise=192.168.56.61:2375&
[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ sudo docker version

[vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ sudo service docker stop
[vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ sudo /usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://192.168.56.62:2379 --cluster-advertise=192.168.56.62:2375&
[vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ sudo docker version

[vagrant@docker-node1 ~]$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
6a9dcf7a7d0a bridge bridge local
39385556d8cd host host local
7828c2433efd none null local
[vagrant@docker-node2 ~]$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
e93dec4d4089 bridge bridge local
199f9936292d host host local
4a55973792c6 none null local
在docker-node1机器上创建overlay网络,名字为demo。会自动同步到docker-node2机器上,这个就是etcd做的。
[vagrant@docker-node1 ~]$ sudo docker network create -d overlay demo
d0cf99b69499e384b52854e852b0c734ebae3f039fdacd762e5347de18454fda
[vagrant@docker-node1 ~]$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
6a9dcf7a7d0a bridge bridge local
d0cf99b69499 demo overlay global
39385556d8cd host host local
7828c2433efd none null local
[vagrant@docker-node2 ~]$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
e93dec4d4089 bridge bridge local
d0cf99b69499 demo overlay global
199f9936292d host host local
4a55973792c6 none null local
[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ ./etcdctl ls /docker/network/v1.0/network
/docker/network/v1.0/network/d0cf99b69499e384b52854e852b0c734ebae3f039fdacd762e5347de18454fda
[vagrant@docker-node1 ~]$ docker network inspect demo
[
{
"Name": "demo",
"Id": "d0cf99b69499e384b52854e852b0c734ebae3f039fdacd762e5347de18454fda",
"Created": "2018-12-17T15:00:39.903457425Z",
"Scope": "global",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]

创建Container测试,在不同的机器上创建相同名字的Container会有报错,当在第二个机器上创建test1 Container时,会去etcd中查找。
[vagrant@docker-node1 ~]$ sudo docker run -d --name test1 --net demo busybox sh -c "while true; do sleep 3600; done"
fabfd6aab82b692d0d59ba5ca61d0315fced0fa459510ebe1d17fe8eacd71882
[vagrant@docker-node2 ~]$ sudo docker run -d --name test1 --net demo busybox sh -c "while true; do sleep 3600; done"
docker: Error response from daemon: Conflict. The container name "/test1" is already in use by container "74d40e80850ca8b95f6c43b7c78f755811e71892116428b1108a1f62553dbf7f". You have to remove (or rename) that container to be able to reuse that name.
See 'docker run --help'.
[vagrant@docker-node2 ~]$ sudo docker run -d --name test2 --net demo busybox sh -c "while true; do sleep 3600; done"
09ab0bd6131b7bebbadf7183a4ab6179fd4fea2fe1322c38437e2043bf7b0b43
再次查看网络信息
[vagrant@docker-node1 ~]$ docker network inspect demo
[
{
"Name": "demo",
"Id": "d0cf99b69499e384b52854e852b0c734ebae3f039fdacd762e5347de18454fda",
"Created": "2018-12-17T15:00:39.903457425Z",
"Scope": "global",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"ep-596086a2ecffec95d85529df2e24016ac62c8448466f60454e61567482faf02f": {
"Name": "test2",
"EndpointID": "596086a2ecffec95d85529df2e24016ac62c8448466f60454e61567482faf02f",
"MacAddress": "02:42:0a:00:00:03",
"IPv4Address": "10.0.0.3/24",
"IPv6Address": ""
},
"fabfd6aab82b692d0d59ba5ca61d0315fced0fa459510ebe1d17fe8eacd71882": {
"Name": "test1",
"EndpointID": "36027fb8100351e24db8e5691a70420413a5e02d6f397af3794cb2eabdc71fb8",
"MacAddress": "02:42:0a:00:00:02",
"IPv4Address": "10.0.0.2/24",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
[vagrant@docker-node1 ~]$ docker exec test1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:00:00:02 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.2/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
15: eth1@if16: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:13:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.19.0.2/16 brd 172.19.255.255 scope global eth1
valid_lft forever preferred_lft forever
[vagrant@docker-node2 ~]$ docker exec test2 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
7: eth0@if8: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:00:00:03 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.3/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
10: eth1@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
每个Container中两个接口,这时docker network会多出一个docker_gwbridge,Container内其中一个接口连接的就是docker_gwbridge。
[vagrant@docker-node1 ~]$ docker network ls
NETWORK ID NAME DRIVER SCOPE
d0cf99b69499 demo overlay global
c43108631ed7 docker_gwbridge bridge local

[vagrant@docker-node1 ~]$ docker exec test1 ping 10.0.0.3
PING 10.0.0.3 (10.0.0.3): 56 data bytes
64 bytes from 10.0.0.3: seq=0 ttl=64 time=10.259 ms
[vagrant@docker-node1 ~]$ docker exec test1 ping test2
PING test2 (10.0.0.3): 56 data bytes
64 bytes from 10.0.0.3: seq=0 ttl=64 time=5.188 ms

[vagrant@docker-node2 ~]$ docker exec test2 ping 10.0.0.2
PING 10.0.0.2 (10.0.0.2): 56 data bytes
64 bytes from 10.0.0.2: seq=0 ttl=64 time=8.995 ms
[vagrant@docker-node2 ~]$ docker exec test2 ping test1
PING test1 (10.0.0.2): 56 data bytes
64 bytes from 10.0.0.2: seq=0 ttl=64 time=5.485 ms

学习视频地址:https://coding.imooc.com/class/189.html

从零制作Base Image

1、编写一个Demo程序

1
2
3
4
5
6
7
8
9
10
11
12
13
[vagrant@localhost ~]$ sudo yum install glibc-static gcc -y

[vagrant@localhost ~]$ cat helloworld.c
#include <stdio.h>

int main() {
printf("hello world!\n");
return 0;
}

# 不加-static编译,docker run会报错,为什么?后续再查... ...
# standard_init_linux.go:190: exec user process caused "no such file or directory"
[vagrant@localhost ~]$ gcc -static helloworld.c -o helloworld

2、编写Dockerfile

1
2
3
4
[vagrant@localhost ~]$ cat Dockerfile 
FROM scratch
ADD helloworld /
CMD ["/helloworld"]

3、build镜像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
[vagrant@localhost ~]$ docker build -t yujiang/helloworld .
Sending build context to Docker daemon 19.97kB
Step 1/3 : FROM scratch
--->
Step 2/3 : ADD helloworld /
---> 2949199fbdb8
Step 3/3 : CMD ["/helloworld"]
---> Running in 3af2e910629e
Removing intermediate container 3af2e910629e
---> b898a6498b21
Successfully built b898a6498b21
Successfully tagged yujiang/helloworld:latest

[vagrant@localhost ~]$ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
yujiang/helloworld latest b898a6498b21 21 seconds ago 857kB

查看分层
[vagrant@localhost ~]$ docker history yujiang/helloworld
IMAGE CREATED CREATED BY SIZE COMMENT
b898a6498b21 About a minute ago /bin/sh -c #(nop) CMD ["/helloworld"] 0B
2949199fbdb8 About a minute ago /bin/sh -c #(nop) ADD file:5e0b91d4866514aa0… 857kB

[vagrant@localhost ~]$ ll -h
total 848K
-rw-rw-r--. 1 vagrant vagrant 50 Dec 13 16:41 Dockerfile
-rwxrwxr-x. 1 vagrant vagrant 837K Dec 13 16:47 helloworld
-rw-rw-r--. 1 vagrant vagrant 79 Dec 13 16:11 helloworld.c

[vagrant@localhost ~]$ docker run yujiang/helloworld
hello world!

使用docker commit制作image

当container已经存在时,并对容器中进行了一些变化(如安装了某一个软件)。可以把已经改变的container commit一个新的image,命令如下:

1
docker container commit

1、启动一个容器并安装vim

1
2
3
4
5
6
[vagrant@localhost ~]$ docker run -it centos
[root@b2985a1be234 /]# yum install vim -y
[root@b2985a1be234 /]# exit
[vagrant@localhost ~]$ docker container ls -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b2985a1be234 centos "/bin/bash" 3 minutes ago Exited (0) About a minute ago nervous_haslett

2、使用docker commit创建image

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[vagrant@localhost ~]$ docker commit b2985a1be234 yujiang/centos-vim
sha256:d0a8856e664eb754f9854c0c27a92c7d01623d2087a77269fbd12ba5021e6e13
[vagrant@localhost ~]$ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
yujiang/centos-vim latest d0a8856e664e 59 seconds ago 327MB
centos latest 1e1148e4cc2c 7 days ago 202MB

[vagrant@localhost ~]$ docker history 1e1148e4cc2c
IMAGE CREATED CREATED BY SIZE COMMENT
1e1148e4cc2c 7 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B
<missing> 7 days ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B
<missing> 7 days ago /bin/sh -c #(nop) ADD file:6f877549795f4798a… 202MB
[vagrant@localhost ~]$ docker history d0a8856e664e
IMAGE CREATED CREATED BY SIZE COMMENT
d0a8856e664e About a minute ago /bin/bash 126MB
1e1148e4cc2c 7 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B
<missing> 7 days ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B
<missing> 7 days ago /bin/sh -c #(nop) ADD file:6f877549795f4798a… 202MB

这样发布image是不安全的,因为其他人不知道你对镜像做了哪些修改。不提倡。

使用docker build制作image

1、创建centos-vim目录

1
[vagrant@localhost ~]$ mkdir centos-vim && cd centos-vim

2、编写Dockerfile

1
2
3
[vagrant@localhost centos-vim]$ vim Dockerfile
FROM centos
RUN yum install vim -y

3、使用docker build创建image

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[vagrant@localhost centos-vim]$ docker build -t yujiang/centos-vim .
Sending build context to Docker daemon 2.048kB
Step 1/2 : FROM centos
---> 1e1148e4cc2c
Step 2/2 : RUN yum install vim -y
---> Running in 8122262ff02a

...安装过程省略...

Complete!
Removing intermediate container 8122262ff02a
---> e00635baf672
Successfully built e00635baf672
Successfully tagged yujiang/centos-vim:latest



Step 1/2 直接引用centos这层image(1e1148e4cc2c)
Step 2/2 生成了一个临时的image(8122262ff02a),在临时的image里面通过yum安装vim,安装完以后临时的image会被remove掉(Removing intermediate container 8122262ff02a),最后根据刚才临时的image commit成为一个新的image。如果build过程中报错,可以使用这个debug(docker run -it 8122262ff02a /bin/bash)

[vagrant@localhost centos-vim]$ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
yujiang/centos-vim latest e00635baf672 6 minutes ago 327MB
centos latest 1e1148e4cc2c 7 days ago 202MB

推荐使用docker build来构建image。

Dockerfile语法

1
官方文档:https://docs.docker.com/engine/reference/builder/

FROM

1
2
3
4
FROM scratch		# 制作base image
FROM centos # 使用base image
FROM ubuntu:14.04
尽量使用官方的image作为base image

LABEL

1
2
3
4
LABEL maintainer="lnsyyj@xxx.com"
LABEL version="1.0"
LABEL description="Tish is description"
定义image的metadata

RUN

1
2
3
4
5
6
7
RUN yum update && yum install -y vim \
python-devel # 反斜线换行
RUN apt-get update && apt-get install -y perl \
pwgen --no-install-recommends && rm -rf \
/var/lib/apt/lists/* # 注意清理cache
RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME'
为了美观,复杂的RUN请用反斜杠换行,避免无用分层,合并多条命令成一行。

WORKDIR

1
2
3
4
5
6
WORKDIR /root

WORKDIR /test # 如果没有会自动创建test目录
WORKDIR demo
RUN pwd # 输出结果应该是/test/demo
用WORKDIR,不要用RUN cd,尽量使用绝对路径!

ADD 和 COPY

1
2
3
4
5
6
7
8
9
10
ADD hello /

ADD test.tar.gz / # 添加到根目录并解压

WORKDIR /root
ADD hello test/ # /root/test/hello

WORKDIR /root
COPY hello test/
大部分情况,COPY优于ADD!ADD除了COPY还有额外功能(解压)!添加远程文件/目录请使用curl或者wget下载到docker中!

ENV

1
2
3
4
ENV MYSQL_VERSION 5.6	# 设置常量
RUN apt-get install -y mysql-server="${MYSQL_VERSION}" \
&& rm -rf /var/lib/apt/lists/* # 引用常量
尽量使用ENV增加可维护性!

VOLUME 和 EXPOSE

1
(存储和网络),后面我们单独讲!

CMD 和 ENTRYPOINT

1
后面我们单独讲!

RUN vs CMD vs ENTRYPOINT

1
2
3
RUN:执行命令并创建新的Image Layer
CMD:设置容器启动后默认执行的命令和参数
ENTRYPOINT:设置容器启动时运行的命令

Shell和Exec格式

Shell格式

1
2
3
RUN apt-get install -y vim
CMD echo "hello docker"
ENTRYPOINT echo "hello docker"

Exec格式

1
2
3
RUN [ "apt-get", "install", "-y", "vim" ]
CMD [ "/bin/echo", "hello docker" ]
ENTRYPOINT [ "/bin/echo", "hello docker" ]

DEMO

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Dockerfile1:
FROM centos
ENV name Docker
ENTRYPOINT echo "hello $name"

Dockerfile2:
FROM centos
ENV name Docker
ENTRYPOINT [ "/bin/echo", "hello $name" ]

Dockerfile2会原样输出"hello $name",不会像Dockerfile1那样输出"hello Docker"。如何使Dockerfile2像Dockerfile1那样输出的?请看Dockerfile3

Dockerfile3:
FROM centos
ENV name Docker
ENTRYPOINT [ "/bin/bash", "-c", "echo hello $name" ] # 指定-c参数,后面的命令需要放在一个""中,作为一条命令

CMD

  • 容器启动时默认执行的命令

  • 如果docker run指定了其他命令,CMD命令被忽略

  • 如果定义了多个CMD,只有最后一个会执行

1
2
3
4
5
6
FROM centos
ENV name Docker
CMD echo "hello $name"

docker run [image]输出?打印"hello Docker"
Docker run -it [image] /bin/bash输出?不会打印"hello Docker",/bin/bash覆盖了CMD

ENTRYPOINT

  • 让容器以应用程序或者服务的形式运行

  • 不会被忽略,一定会执行

  • 最佳实践:写一个shell脚本作为entrypoint

1
2
3
4
5
COPY docker-entrypoint.sh /usr/local/bin/
ENTRYPOINT [ "docker-entrypoint.sh" ]

EXPOSE 27017
CMD [ "mongod" ]

镜像存放

公有镜像

https://hub.docker.com/

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[vagrant@localhost ~]$ docker build -t lnsyyj/helloworld .
[vagrant@localhost ~]$ docker images
lnsyyj/helloworld latest 1a8620e6d6de 46 hours ago 857kB

[vagrant@localhost ~]$ docker login
Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
Username: lnsyyj
Password:
Login Succeeded

[vagrant@localhost ~]$ docker push lnsyyj/helloworld:latest
The push refers to repository [docker.io/lnsyyj/helloworld]
a9094ec14918: Pushed
latest: digest: sha256:dd740db962a1e3a8fb74461505f539248b7c88de80b133db612c22e80d7b2d17 size: 527

删除本地镜像,测试下载镜像
[vagrant@localhost ~]$ docker rmi lnsyyj/helloworld
[vagrant@localhost ~]$ docker pull lnsyyj/helloworld
  • 也可以link到github,github中的Repositories中有Dockerfile,dockehub会自动build镜像

私有镜像

搭建docker registry

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
https://hub.docker.com/_/registry

[vagrant@localhost ~]$ docker run -d -p 5000:5000 --restart always --name registry registry:2
Unable to find image 'registry:2' locally
2: Pulling from library/registry
d6a5679aa3cf: Pull complete
ad0eac849f8f: Pull complete
2261ba058a15: Pull complete
f296fda86f10: Pull complete
bcd4a541795b: Pull complete
Digest: sha256:5a156ff125e5a12ac7fdec2b90b7e2ae5120fa249cf62248337b6d04abc574c8
Status: Downloaded newer image for registry:2
2707d472d3dba19f366c7ca51e621b83a63975492152880e01268a326d34bf50

[vagrant@localhost ~]$ docker build -t 10.0.2.15:5000/helloworld .
Sending build context to Docker daemon 873.5kB
Step 1/3 : FROM scratch
--->
Step 2/3 : ADD helloworld /
---> Using cache
---> 1b468168e95e
Step 3/3 : CMD ["/helloworld"]
---> Using cache
---> 1a8620e6d6de
Successfully built 1a8620e6d6de
Successfully tagged 10.0.2.15:5000/helloworld:latest

[vagrant@localhost ~]$ cat /etc/docker/daemon.json
{ "insecure-registries": ["10.0.2.15:5000"] }

[vagrant@localhost ~]$ sudo vim /lib/systemd/system/docker.service
添加
EnvironmentFile=-/etc/docker/daemon.json
[vagrant@localhost ~]$ sudo systemctl daemon-reload
[vagrant@localhost ~]$ service docker restart

push到私有镜像仓库
[vagrant@localhost ~]$ docker images
10.0.2.15:5000/helloworld latest 1a8620e6d6de 47 hours ago 857kB
[vagrant@localhost ~]$ curl http://10.0.2.15:5000/v2/_catalog
{"repositories":[]}
[vagrant@localhost ~]$ docker push 10.0.2.15:5000/helloworld
The push refers to repository [10.0.2.15:5000/helloworld]
a9094ec14918: Pushed
latest: digest: sha256:dd740db962a1e3a8fb74461505f539248b7c88de80b133db612c22e80d7b2d17 size: 527
[vagrant@localhost ~]$ curl http://10.0.2.15:5000/v2/_catalog
{"repositories":["helloworld"]}
验证
[vagrant@localhost ~]$ docker rmi 10.0.2.15:5000/helloworld
[vagrant@localhost ~]$ docker pull 10.0.2.15:5000/helloworld

container常用命令

docker exec,进入运行中的容器。

1
docker exec -it <container ID> /bin/bash

docker stop,停止运行中的容器。

1
docker stop <container ID>

docker inspect,查看容器的详细信息。

1
docker inspect <container ID>

docker logs,查看容器的log。

1
docker logs <container ID>

linux压力测试工具stress

docker build命令行程序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[vagrant@localhost ~]$ mkdir stress && cd stress/
[vagrant@localhost stress]$ cat Dockerfile
FROM ubuntu
RUN apt-get update && apt-get install -y stress
ENTRYPOINT ["/usr/bin/stress"]
CMD []
[vagrant@localhost stress]$ docker build -t lnsyyj/ubuntu-stress .

[vagrant@localhost stress]$ docker run -it lnsyyj/ubuntu-stress --vm 1 --verbose
stress: info: [1] dispatching hogs: 0 cpu, 0 io, 1 vm, 0 hdd
stress: dbug: [1] using backoff sleep of 3000us
stress: dbug: [1] --> hogvm worker 1 [6] forked
stress: dbug: [6] allocating 268435456 bytes ...
stress: dbug: [6] touching bytes in strides of 4096 bytes ...
stress: dbug: [6] freed 268435456 bytes
stress: dbug: [6] allocating 268435456 bytes ...
stress: dbug: [6] touching bytes in strides of 4096 bytes ...
stress: dbug: [6] freed 268435456 bytes

--vm 1 --verbose是通过Dockerfile中的CMD []接收的。ENTRYPOINT+CMD这种方式是非常流行的。

对一个容器进行资源限制

1
2
3
4
5
6
7
8
9
10
[vagrant@localhost stress]$ docker run --memory=200M  -it lnsyyj/ubuntu-stress --vm 1 --vm-bytes 500M --verbose
stress: info: [1] dispatching hogs: 0 cpu, 0 io, 1 vm, 0 hdd
stress: dbug: [1] using backoff sleep of 3000us
stress: dbug: [1] --> hogvm worker 1 [6] forked
stress: dbug: [6] allocating 524288000 bytes ...
stress: dbug: [6] touching bytes in strides of 4096 bytes ...
stress: FAIL: [1] (415) <-- worker 6 got signal 9
stress: WARN: [1] (417) now reaping child worker processes
stress: FAIL: [1] (421) kill error: No such process
stress: FAIL: [1] (451) failed run completed in 1s

docker 底层技术支持

  • namespaces:做隔离pid、net、ipc、mnt、uts

  • control groups:做资源限制

  • union file systems:container和image的分层

学习视频地址:https://coding.imooc.com/class/189.html

在centos7上安装docker

1
https://docs.docker.com/install/linux/docker-ce/centos/

新建docker用户,并安装依赖

1
2
3
4
5
6
7
8
9
[root@docker ~]# adduser docker
[root@docker ~]# hostnamectl set-hostname docker
[root@docker ~]# vi /etc/sudoers
添加
docker ALL=(ALL) ALL
[root@docker ~]# su - docker
上一次登录:三 12月 12 10:26:46 EST 2018pts/0 上
[docker@docker ~]$
[docker@docker ~]$ sudo yum install epel-release vim -y

安装社区版

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
卸载已安装的docker
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine

安装required packages
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2

安装docker源
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo

安装docker
[docker@docker ~]$ sudo yum install docker-ce -y

启动docker进程,并设置开机启动
[docker@docker ~]$ sudo systemctl start docker
[docker@docker ~]$ sudo systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

测试docker安装
[docker@docker ~]$ sudo docker run hello-world

Vagrant建立虚拟机,并在虚拟机中安装docker

1、安装VirtualBox然后安装Vagrant

2、下载Vagrant box

1
2
yujiangdeMBP-13:centos7 yujiang$ ls ~/.vagrant.d/downloadboxes/centos7/
virtualbox.box

3、添加Vagrant box到镜像列表

1
2
3
4
5
6
7
8
yujiangdeMBP-13:centos7 yujiang$ vagrant box add centos/centos7 ~/.vagrant.d/downloadboxes/centos7/virtualbox.box 
==> box: Box file was not detected as metadata. Adding it directly...
==> box: Adding box 'centos/centos7' (v0) for provider:
box: Unpacking necessary files from: file:///Users/yujiang/.vagrant.d/downloadboxes/centos7/virtualbox.box
==> box: Successfully added box 'centos/centos7' (v0) for 'virtualbox'!

yujiangdeMBP-13:centos7 yujiang$ vagrant box list
centos/centos7 (virtualbox, 0)

4、创建Vagrant虚拟机目录并启动虚拟机

1
2
3
4
5
6
yujiangdeMBP-13:~ yujiang$ mkdir Vagrant && cd Vagrant/
yujiangdeMBP-13:Vagrant yujiang$ vagrant init centos/centos7
yujiangdeMBP-13:Vagrant yujiang$ vagrant up
yujiangdeMBP-13:Vagrant yujiang$ vagrant ssh
[vagrant@localhost ~]$ exit
yujiangdeMBP-13:Vagrant yujiang$ vagrant destroy

5、之后使用《在centos7上安装docker》安装docker

编辑Vagrantfile,启动虚拟机时自动安装docker

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# -*- mode: ruby -*-
Vagrant.configure("2") do |config|
config.vm.box = "centos/centos7"
config.vm.provision "shell", inline: <<-SHELL
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce -y
sudo systemctl start docker
sudo systemctl enable docker
sudo groupadd docker
sudo gpasswd -a vagrant docker
SHELL
end

yujiangdeMBP-13:Vagrant yujiang$ vagrant up
yujiangdeMBP-13:Vagrant yujiang$ vagrant ssh
[vagrant@localhost ~]$ sudo docker version
Client:
Version: 18.09.0
API version: 1.39
Go version: go1.10.4
Git commit: 4d60db4
Built: Wed Nov 7 00:48:22 2018
OS/Arch: linux/amd64
Experimental: false

Server: Docker Engine - Community
Engine:
Version: 18.09.0
API version: 1.39 (minimum version 1.12)
Go version: go1.10.4
Git commit: 4d60db4
Built: Wed Nov 7 00:19:08 2018
OS/Arch: linux/amd64
Experimental: false

使用docker命令时,去掉sudo

1、添加docker group

1
2
[vagrant@localhost ~]$ sudo groupadd docker
groupadd: group 'docker' already exists

2、添加当前用户到docker group

1
2
[vagrant@localhost ~]$ sudo gpasswd -a vagrant docker
Adding user vagrant to group docker

3、退出当前终端,重新连接

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[vagrant@localhost ~]$ exit

yujiangdeMBP-13:Vagrant yujiang$ vagrant ssh

[vagrant@localhost ~]$ docker version
Client:
Version: 18.09.0
API version: 1.39
Go version: go1.10.4
Git commit: 4d60db4
Built: Wed Nov 7 00:48:22 2018
OS/Arch: linux/amd64
Experimental: false

Server: Docker Engine - Community
Engine:
Version: 18.09.0
API version: 1.39 (minimum version 1.12)
Go version: go1.10.4
Git commit: 4d60db4
Built: Wed Nov 7 00:19:08 2018
OS/Arch: linux/amd64
Experimental: false

搭建jenkins(centos 7)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
安装jenkins
sudo yum install wget -y
sudo wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat/jenkins.repo
sudo rpm --import https://jenkins-ci.org/redhat/jenkins-ci.org.key
sudo yum install jenkins java -y

启动jenkins
sudo systemctl start jenkins
sudo systemctl enable jenkins
sudo systemctl status jenkins

设置防火墙
sudo firewall-cmd --permanent --new-service=jenkins
sudo firewall-cmd --permanent --service=jenkins --set-short="Jenkins Service Ports"
sudo firewall-cmd --permanent --service=jenkins --set-description="Jenkins service firewalld port exceptions"
sudo firewall-cmd --permanent --service=jenkins --add-port=8080/tcp
sudo firewall-cmd --permanent --add-service=jenkins
sudo firewall-cmd --zone=public --add-service=http --permanent
sudo firewall-cmd --reload

搭建gitlab

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
sudo yum install -y curl policycoreutils-python openssh-server
sudo systemctl enable sshd
sudo systemctl start sshd
sudo firewall-cmd --permanent --add-service=http
sudo firewall-cmd --permanent --add-service=httpsudo systemctl reload firewalld
sudo systemctl reload firewalld

安装Postfix以发送通知电子邮件
sudo yum install postfix
sudo systemctl enable postfix
sudo systemctl start postfix

安装gitbla
wget https://mirror.tuna.tsinghua.edu.cn/gitlab-ce/yum/el7/gitlab-ce-11.5.1-ce.0.el7.x86_64.rpm
rpm -i gitlab-ce-11.5.1-ce.0.el7.x86_64.rpm


gitlab-ctl reconfigure

vi /etc/gitlab/gitlab.rb
unicorn['port'] = 80

gitlab-ctl reconfigure
gitlab-ctl restart

配置jenkins gitlab ci

手动部署ceph是看懂官方ansible或ceph-deploy的关键,了解部署步骤和相关组件才能更好的理解代码,理解OSD的挂载流程,所以个人认为需要做此实验。

官方文档:http://docs.ceph.com/docs/master/install/manual-deployment/

MANUAL DEPLOYMENT

所有Ceph集群都需要至少一个monitor,并且至少需要与集群中存储对象副本一样多的OSD。引导初始monitor(s)是部署Ceph存储集群的第一步。monitor部署还为整个集群设置了重要准则,例如pool的副本数,每个OSD的placement groups数(PG数),心跳间隔,是否需要身份验证等。这些值中的大多数都是默认设置的,因此在设置生产环境集群时,了解它们很有用。
遵循与安装(快速)相同的配置,我们将设置一个集群,其中node1作为监控节点,node2和node3作为OSD节点

MONITOR BOOTSTRAPPING

引导monitor需要许多东西:

  • Unique Identifier(唯一标识符): fsid是集群的唯一标识符,它是 Ceph 作为文件系统时的文件系统标识符。现在Ceph也支持原生接口,块设备和对象存储网关接口,因此fsid有点不恰当的。

  • Cluster Name(集群名称): Ceph集群有一个集群名称,这是一个没有空格的字符串。默认集群名称为ceph,但您可以指定其他集群名称。当您使用多个集群并且需要清楚地了解正在使用哪个集群时,覆盖默认集群名称尤其有用。

    例如,在multisite configuration中运行多个集群时,集群名称(例如,us-west,us-east)标识当前CLI会话的集群。注意:要在CLI标识集群名称,请使用集群名称指定Ceph配置文件(例如,ceph.conf,us-west.conf,us-east.conf等)。另请参阅CLI用法(ceph –cluster {cluster-name})。

  • Monitor Name: 集群中的每个monitor实例都具有唯一的名称。通常,Ceph Monitor名称是主机名(我们建议独立一台主机运行Ceph Monitor,而不要将Ceph OSD Daemons与Ceph Monitors混合部署在一台机器上)。您可以使用hostname -s检索短主机名。

  • Monitor Map: 引导初始monitor(s)需要您生成monitor map。monitor map需要fsid,cluster name,以及至少一个host name及其IP地址。

  • Monitor Keyring: Monitors通过secret key相互通信。您必须生成带有monitor secret的keyring,并在引导初始monitor(s)时提供它。

  • Administrator Keyring: 要使用ceph CLI tools,您必须具有client.admin用户。因此,您必须生成admin用户和相应keyring,并且还必须将client.admin用户添加到monitor keyring。

上述要求并不意味着创建Ceph配置文件。但是,作为最佳实践,我们建议创建一个Ceph配置文件,并使用fsid,mon initial members和mon host填充它。

您也可以在运行时获取并设置所有monitor settings。但是,Ceph配置文件可能只包含覆盖默认值的那些settings。将settings添加到Ceph配置文件时,这些settings会覆盖默认settings。在Ceph配置文件中维护这些settings可以使您更轻松地维护集群。

过程如下:

1、登录monitor节点:

1
ssh {hostname}

例如:

1
ssh node1

2、确保您有Ceph配置文件的目录。 默认情况下,Ceph使用/etc/ceph。 安装ceph时,安装程序将自动创建/etc/ceph目录。

1
ls /etc/ceph

注意:部署工具可能在清除集群时删除此目录(例如,ceph-deploy purgedata {node-name},ceph-deploy purge {node-name})。

3、创建Ceph配置文件。 默认情况下,Ceph使用ceph.conf,其中ceph反映了集群名称。

1
sudo vim /etc/ceph/ceph.conf

4、为您的集群生成唯一ID(即fsid)。

1
uuidgen

5、将唯一ID添加到Ceph配置文件中。

1
fsid = {UUID}

例如:

1
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993

6、将初始monitor(s)添加到Ceph配置文件中。

1
mon initial members = {hostname}[,{hostname}]

例如:

1
mon initial members = node1

7、将初始monitor(s)的IP地址添加到Ceph配置文件并保存文件。

1
mon host = {ip-address}[,{ip-address}]

例如:

1
mon host = 192.168.0.1

注意:您可以使用IPv6地址而不是IPv4地址,但必须将ms bind ipv6设置为true。有关网络配置的详细信息,请参阅网络配置参考

8、为集群创建keyring并生成monitor secret key。

1
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'

9、生成administrator keyring,生成client.admin用户并将用户添加到keyring。

1
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'

10、生成bootstrap-osd keyring,生成client.bootstrap-osd用户并将用户添加到keyring。

1
sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'

11、将生成的keys添加到ceph.mon.keyring。

1
2
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

12、使用主机名,主机IP地址和FSID生成monitor map。将其另存为/tmp/monmap。

1
monmaptool --create --add {hostname} {ip-address} --fsid {uuid} /tmp/monmap

例如:

1
monmaptool --create --add node1 192.168.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap

13、在monitor主机上创建默认数据目录(或多个目录)。

1
sudo mkdir /var/lib/ceph/mon/{cluster-name}-{hostname}

例如:

1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node1

有关详细信息,请参阅Monitor Config Reference - Data

14、使用monitor map和keyring填充monitor daemon(s)。

1
sudo -u ceph ceph-mon [--cluster {cluster-name}] --mkfs -i {hostname} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

例如:

1
sudo -u ceph ceph-mon --mkfs -i node1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

15、考虑Ceph配置文件的settings,常见settings包括以下内容:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[global]
fsid = {cluster-id}
mon initial members = {hostname}[, {hostname}]
mon host = {ip-address}[, {ip-address}]
public network = {network}[, {network}]
cluster network = {network}[, {network}]
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = {n}
osd pool default size = {n} # Write an object n times.
osd pool default min size = {n} # Allow writing n copies in a degraded state.
osd pool default pg num = {n}
osd pool default pgp num = {n}
osd crush chooseleaf type = {n}

在上面的示例中,配置的[global]部分可能如下所示:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[global]
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
mon initial members = node1
mon host = 192.168.0.1
public network = 192.168.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1

16、启动monitor(s)。

对于大多数linux发行版,services通过systemd启动:

1
sudo systemctl start ceph-mon@node1

对于较旧的Debian/CentOS/RHEL,请使用sysvinit:

1
sudo /etc/init.d/ceph start mon.node1

17、验证monitor是否正在运行。

1
ceph -s

您应该看到monitor已启动并打印运行的输出,并且您应该看到一个运行状况错误,提示placement groups(PG)处于inactive状态。看起来像这样:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cluster:
id: a7f64266-0894-4f1e-a635-d0aeaca0e993
health: HEALTH_OK

services:
mon: 1 daemons, quorum node1
mgr: node1(active)
osd: 0 osds: 0 up, 0 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 bytes
usage: 0 kB used, 0 kB / 0 kB avail
pgs:

注意:添加OSD并启动它们后,placement group(PG)错误应该消失。有关详细信息,请参阅添加OSD

MANAGER DAEMON CONFIGURATION

在运行ceph-mon daemon的每个节点上,还应该设置ceph-mgr daemon。

请参阅ceph-mgr管理员指南

ADDING OSDS

初始monitor(s)运行后,应添加OSD。在有足够的OSD来处理对象的副本数量之前,您的集群无法达到active + clean状态(例如,osd pool default size = 2需要至少两个OSD)。

引导monitor后,您的集群具有默认的CRUSH map,但是,CRUSH map没有映射到Ceph节点的任何Ceph OSD Daemons。

SHORT FORM(简写)

Ceph提供了ceph-volume实用程序,它可以prepare logical volume,disk或partition,以便与ceph一起使用。 ceph-volume实用程序通过递增索引来创建OSD ID。 此外,ceph-volume会将新OSD添加到CRUSH map中的host下。执行ceph-volume -h以获取CLI详细信息。ceph-volume实用程序自动执行下面的Long Form的步骤。要使用short form过程创建前两个OSD,请在node2和node3上执行以下操作:

BLUESTORE

1、创建OSD。

1
2
ssh {node-name}
sudo ceph-volume lvm create --data {data-path}

例如:

1
2
ssh node1
sudo ceph-volume lvm create --data /dev/hdd1

或者,创建过程可以分为两个阶段(prepare和activate):

1、Prepare the OSD。

1
2
ssh {node-name}
sudo ceph-volume lvm prepare --data {data-path} {data-path}

例如:

1
2
ssh node1
sudo ceph-volume lvm prepare --data /dev/hdd1

prepare后,激活prepare后的OSD的ID和FSID。这些可以通过列出当前服务器中的OSD来获得:

1
sudo ceph-volume lvm list

2、Activate the OSD。

1
sudo ceph-volume lvm activate {ID} {FSID}

例如:

1
sudo ceph-volume lvm activate 0 a7f64266-0894-4f1e-a635-d0aeaca0e993

FILESTORE

1、Create the OSD。

1
2
ssh {node-name}
sudo ceph-volume lvm create --filestore --data {data-path} --journal {journal-path}

例如:

1
2
ssh node1
sudo ceph-volume lvm create --filestore --data /dev/hdd1 --journal /dev/hdd2

或者,创建过程可以分为两个阶段(prepare和activate):

1、Prepare the OSD。

1
2
ssh {node-name}
sudo ceph-volume lvm prepare --filestore --data {data-path} --journal {journal-path}

例如:

1
2
ssh node1
sudo ceph-volume lvm prepare --filestore --data /dev/hdd1 --journal /dev/hdd2

prepare后,激活prepare后的OSD的ID和FSID。这些可以通过列出当前服务器中的OSD来获得:

1
sudo ceph-volume lvm list

2、Activate the OSD。

1
sudo ceph-volume lvm activate --filestore {ID} {FSID}

例如:

1
sudo ceph-volume lvm activate --filestore 0 a7f64266-0894-4f1e-a635-d0aeaca0e993

LONG FORM

如果没有任何helper实用程序,请创建一个OSD并使用以下步骤将其添加到集群和CRUSH map中。要使用long form方式创建前两个OSD,请对每个OSD执行以下步骤。

1
注意:此过程没有描述使用dm-crypt“lockbox”在dm-crypt之上的部署。

1、连接到OSD主机并变为root用户。

1
2
ssh {node-name}
sudo bash

2、为OSD生成UUID。

1
UUID=$(uuidgen)

3、为OSD生成cephx key。

1
OSD_SECRET=$(ceph-authtool --gen-print-key)

4、创建OSD。 请注意,如果您需要重用之前销毁的OSD ID,可以提供OSD ID作为ceph osd new的附加参数。我们假设机器上存在client.bootstrap-osd key。您也可以在拥有该key的其他主机上以client.admin身份执行此命令:

1
2
3
ID=$(echo "{\"cephx_secret\": \"$OSD_SECRET\"}" | \
ceph osd new $UUID -i - \
-n client.bootstrap-osd -k /var/lib/ceph/bootstrap-osd/ceph.keyring)

也可以在JSON中包含crush_device_class属性,以设置默认值以外的initial class(基于自动检测到的设备类型,ssd或hdd)。

5、在新OSD上创建默认目录。

1
mkdir /var/lib/ceph/osd/ceph-$ID

6、如果OSD使用操作系统以外的drive,先创建文件系统,并将其mount到刚刚创建的目录。

1
2
mkfs.xfs /dev/{DEV}
mount /dev/{DEV} /var/lib/ceph/osd/ceph-$ID

7、将secret写入OSD keyring文件中。

1
2
ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-$ID/keyring \
--name osd.$ID --add-key $OSD_SECRET

8、初始化OSD数据目录。

1
ceph-osd -i $ID --mkfs --osd-uuid $UUID

9、修复所有权。

1
chown -R ceph:ceph /var/lib/ceph/osd/ceph-$ID

10、将OSD添加到Ceph后,OSD就在您的配置中。但是,它还没有运行。必须先启动新的OSD才能开始接收数据。

systemd方式:

1
2
systemctl enable ceph-osd@$ID
systemctl start ceph-osd@$ID

例如:

1
2
systemctl enable ceph-osd@12
systemctl start ceph-osd@12

ADDING MDS

在以下说明中,{id}是任意名称,例如计算机的hostname。

1、创建mds数据目录:

1
mkdir -p /var/lib/ceph/mds/{cluster-name}-{id}

2、创建一个keyring:

1
ceph-authtool --create-keyring /var/lib/ceph/mds/{cluster-name}-{id}/keyring --gen-key -n mds.{id}

3、导入keyring并设置caps:

1
ceph auth add mds.{id} osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/{cluster}-{id}/keyring

4、添加到ceph.conf:

1
2
[mds.{id}]
host = {id}

5、以手动方式启动daemon:

1
ceph-mds --cluster {cluster-name} -i {id} -m {mon-hostname}:{mon-port} [-f]

6、以正确的方式启动daemon(使用ceph.conf entry):

1
service ceph start

7、如果启动daemon失败并显示以下错误:

1
mds.-1.0 ERROR: failed to authenticate: (22) Invalid argument

那么,需要确认你没有在ceph.conf的global section中设置keyring;并将其移至client section。或添加特定于此mds daemon的keyring设置。并验证您是否在mds data目录中看到相同的key,并且与ceph auth get mds.{id}命令的输出相同。

8、现在您已准备好创建Ceph filesystem

SUMMARY

一旦您的monitor和两个OSD启动并运行,可以通过执行以下操作来观察placement groups peer:

1
ceph -w

要查看tree,请执行以下操作:

1
ceph osd tree

你应该看到像这样的输出:

1
2
3
4
5
6
# id    weight  type name       up/down reweight
-1 2 root default
-2 2 host node1
0 1 osd.0 up 1
-3 1 host node2
1 1 osd.1 up 1

要添加(或删除)其他monitors,请参阅Add/Remove Monitors。要添加(或删除)其他Ceph OSD Daemons,请参阅Add/Remove OSDs

部署实验

monitor部署实验

1、添加ceph源

1
2
3
4
5
6
7
8
9
10
[root@cephlm ~]# vi /etc/yum.repos.d/ceph.repo 
[ceph]
name=Ceph
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/x86_64/
# baseurl=https://download.ceph.com/rpm-luminous/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.163.com/ceph/keys/release.asc
# gpgkey=https://download.ceph.com/keys/release.asc

2、安装epel与ceph

1
2
3
4
[1]epel参考:https://blog.csdn.net/yasi_xi/article/details/11746255
EPEL的全称叫 Extra Packages for Enterprise Linux 。EPEL是由 Fedora 社区打造,为 RHEL 及衍生发行版如 CentOS、Scientific Linux 等提供高质量软件包的项目。装上了 EPEL之后,就相当于添加了一个第三方源。

[root@cephlm ~]# yum install epel-release -y && yum install ceph -y

3、为集群创建keyring并生成monitor secret key。

1
2
3
4
5
6
[root@cephlm ~]# ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
creating /tmp/ceph.mon.keyring
[root@cephlm ~]# cat /tmp/ceph.mon.keyring
[mon.]
key = AQAgbv1bc62FBBAAvuCz2a5EDtbAmy9ep1Dxxw==
caps mon = "allow *"

4、生成administrator keyring,生成client.admin用户并将用户添加到keyring。

1
2
3
4
5
6
7
8
9
[root@cephlm ~]# sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
creating /etc/ceph/ceph.client.admin.keyring
[root@cephlm ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQBFbv1bImaJCxAAYUiUCuia//zZSMIPyOHJuA==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"

5、生成bootstrap-osd keyring,生成client.bootstrap-osd用户并将用户添加到keyring。

1
2
3
4
5
6
[root@cephlm ~]# sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
creating /var/lib/ceph/bootstrap-osd/ceph.keyring
[root@cephlm ~]# cat /var/lib/ceph/bootstrap-osd/ceph.keyring
[client.bootstrap-osd]
key = AQBQbv1bXv0WAhAAo/hv7OOaftMHOovHNeyOFg==
caps mon = "profile bootstrap-osd"

6、将生成的keys添加到ceph.mon.keyring。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@cephlm ~]# sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
[root@cephlm ~]# sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

[root@cephlm ~]# cat /tmp/ceph.mon.keyring
[mon.]
key = AQAgbv1bc62FBBAAvuCz2a5EDtbAmy9ep1Dxxw==
caps mon = "allow *"
[client.admin]
key = AQBFbv1bImaJCxAAYUiUCuia//zZSMIPyOHJuA==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
[client.bootstrap-osd]
key = AQBQbv1bXv0WAhAAo/hv7OOaftMHOovHNeyOFg==
caps mon = "profile bootstrap-osd"

7、使用主机名,主机IP地址和FSID生成monitor map。将其另存为/tmp/monmap。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@cephlm ~]# monmaptool --create --add cephlm 192.168.0.10 --fsid c8b0b137-1ba7-4c1f-a514-281139c35233 /tmp/monmap
monmaptool: monmap file /tmp/monmap
monmaptool: set fsid to c8b0b137-1ba7-4c1f-a514-281139c35233
monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)

更改权限,可是使ceph用户有读该文件的权限
[root@cephlm ~]# chmod +r /tmp/monmap && chmod +r /tmp/ceph.mon.keyring

[root@cephlm ~]# monmaptool --print /tmp/monmap
monmaptool: monmap file /tmp/monmap
epoch 0
fsid c8b0b137-1ba7-4c1f-a514-281139c35233
last_changed 2018-11-28 10:55:11.677288
created 2018-11-28 10:55:11.677288
0: 192.168.56.205:6789/0 mon.cephlm

8、创建配置文件文件<集群名>.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@cephlm ~]# vi /etc/ceph/ceph.conf 
[global]
fsid = c8b0b137-1ba7-4c1f-a514-281139c35233
mon initial members = cephlm
mon host = 192.168.0.10
public network = 192.168.0.0/24
cluster network = 192.168.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd pool default size = 1
osd pool default min size = 1
osd pool default pg num = 16
osd pool default pgp num = 16

9、使用monitor map和keyring填充monitor daemon(s)。

1
[root@cephlm ~]# sudo -u ceph ceph-mon --cluster ceph --mkfs -i cephlm --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

10、启动ceph-mon服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@cephlm ~]# systemctl enable ceph-mon@cephlm
[root@cephlm ~]# systemctl start ceph-mon@cephlm

[root@cephlm ~]# ceph -s
cluster:
id: c8b0b137-1ba7-4c1f-a514-281139c35233
health: HEALTH_OK

services:
mon: 1 daemons, quorum cephlm
mgr: no daemons active
osd: 0 osds: 0 up, 0 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 0B used, 0B / 0B avail
pgs:

osd部署实验

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
1、为OSD生成UUID。
[root@cephlm ~]# UUID=$(uuidgen)
[root@cephlm ~]# echo ${UUID}
265b6807-fa12-46f7-8e25-ce8b03dc2a2d

2、为OSD生成cephx key。
[root@cephlm ~]# OSD_SECRET=$(ceph-authtool --gen-print-key)
[root@cephlm ~]# echo ${OSD_SECRET}
AQDljwhcB8MxBxAATx/pqYOv0uRhQI9Tey5UeQ==

3、为OSD生成cephx key(秘钥)。
[root@cephlm ~]# ID=$(echo "{\"cephx_secret\": \"$OSD_SECRET\"}" | \
ceph osd new $UUID -i - \/*
-n client.bootstrap-osd -k /var/lib/ceph/bootstrap-osd/ceph.keyring)
[root@cephlm ~]# echo ${ID}
0

4、
[root@cephlm ~]# mkdir /var/lib/ceph/osd/ceph-$ID

5、
[root@cephlm ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 20G 0 disk
├─vda1 253:1 0 1G 0 part /boot
├─vda2 253:2 0 4G 0 part [SWAP]
└─vda3 253:3 0 15G 0 part /
vdb 253:16 0 50G 0 disk
vdc 253:32 0 50G 0 disk
vdd 253:48 0 50G 0 disk

[root@cephlm ~]# ll /var/lib/ceph/osd/
总用量 0
drwxr-xr-x 2 root root 6 12月 6 11:03 ceph-0

[root@cephlm ~]# mkfs.xfs /dev/vdb -f
meta-data=/dev/vdb isize=512 agcount=4, agsize=3276800 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=13107200, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=6400, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0

[root@cephlm ~]# mount /dev/vdb /var/lib/ceph/osd/ceph-$ID/
[root@cephlm ~]# df -Th
文件系统 类型 容量 已用 可用 已用% 挂载点
/dev/vda3 xfs 15G 1.6G 14G 11% /
devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev
tmpfs tmpfs 1.9G 0 1.9G 0% /dev/shm
tmpfs tmpfs 1.9G 8.6M 1.9G 1% /run
tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/vda1 xfs 1014M 172M 843M 17% /boot
tmpfs tmpfs 379M 0 379M 0% /run/user/0
/dev/vdb xfs 50G 33M 50G 1% /var/lib/ceph/osd/ceph-0

6、将secret写入OSD keyring文件。
[root@cephlm ~]# ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-$ID/keyring --name osd.$ID --add-key $OSD_SECRET
creating /var/lib/ceph/osd/ceph-0/keyring
added entity osd.0 auth auth(auid = 18446744073709551615 key=AQDljwhcB8MxBxAATx/pqYOv0uRhQI9Tey5UeQ== with 0 caps)

7、初始化OSD数据目录
[root@cephlm ~]# ceph-osd -i $ID --mkfs --osd-uuid $UUID
2019-08-02 18:07:34.990087 7f41b6680d80 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2019-08-02 18:07:35.061501 7f41b6680d80 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2019-08-02 18:07:35.062969 7f41b6680d80 -1 journal do_read_entry(4096): bad header magic
2019-08-02 18:07:35.062993 7f41b6680d80 -1 journal do_read_entry(4096): bad header magic
2019-08-02 18:07:35.063581 7f41b6680d80 -1 read_settings error reading settings: (2) No such file or directory
2019-08-02 18:07:35.146735 7f41b6680d80 -1 created object store /var/lib/ceph/osd/ceph-0 for osd.0 fsid c8b0b137-1ba7-4c1f-a514-281139c35233

8、
[root@cephlm ~]# chown -R ceph:ceph /var/lib/ceph/osd/ceph-$ID

9、
[root@cephlm ~]# systemctl enable ceph-osd@$ID
[root@cephlm ~]# systemctl start ceph-osd@$ID
10、
[root@sds-ceph-1 ~]# ceph osd pool create rbd 16
[root@sds-ceph-1 ~]# rados bench -p rbd 10 write --no-cleanup

[root@sds-ceph-1 ~]# tree -h /var/lib/ceph/osd/ceph-0/current/
/var/lib/ceph/osd/ceph-0/current/
├── [4.0K] 1.0_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject126__head_314B9110__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject138__head_F0F776D0__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject149__head_9DC23D60__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject154__head_03D0DB50__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject179__head_E33CA580__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject209__head_3BFDF2F0__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject211__head_DECF96B0__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject212__head_5CED1CF0__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject217__head_1319A430__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject220__head_524D4D00__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject229__head_F40C7DE0__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject22__head_1686F940__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject235__head_516A6B20__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject236__head_12CA1390__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject73__head_397FC990__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject77__head_ADAEA100__1
│   └── [ 0] __head_00000000__1
├── [ 6] 1.0_TEMP
├── [4.0K] 1.1_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject102__head_862AA4F1__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject129__head_F5965891__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject151__head_98E52B51__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject184__head_E52D8D41__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject1__head_F5F320A1__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject234__head_00E8BB81__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject244__head_2363F6E1__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject25__head_581DDE31__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject32__head_324147A1__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject35__head_34FABC91__1
│   └── [ 0] __head_00000001__1
├── [ 6] 1.1_TEMP
├── [4.0K] 1.2_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject104__head_413DA2B2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject117__head_C0124C02__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject125__head_9DBB0642__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject153__head_37933452__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject167__head_0AE2CE12__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject173__head_821A3292__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject175__head_734C3FB2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject180__head_74A8D2D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject195__head_7A482B82__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject214__head_C62A3262__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject227__head_FBA5E8D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject230__head_F81DABF2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject240__head_96D571D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject251__head_8C210072__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject28__head_8D1859C2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject43__head_9D4243F2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject4__head_6D94E022__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject53__head_F31A17D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject74__head_BA8C88D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject93__head_26EE91D2__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject97__head_1025B4F2__1
│   └── [ 0] __head_00000002__1
├── [ 6] 1.2_TEMP
├── [4.0K] 1.3_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject114__head_C05EC3E3__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject145__head_BB9B5383__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject187__head_7F2D3B93__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject20__head_FB09C543__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject222__head_CA878AF3__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject223__head_45D47B63__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject249__head_EB2C03F3__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject2__head_E1BEFAD3__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject48__head_C2ABBF03__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject58__head_73A784F3__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject66__head_7D1C7D23__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject68__head_64431003__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject75__head_EFA44023__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject7__head_1EDC2B13__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject83__head_56DE9F23__1
│   └── [ 0] __head_00000003__1
├── [ 6] 1.3_TEMP
├── [4.0K] 1.4_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject0__head_F8BC28D4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject160__head_866C8F94__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject162__head_02475BD4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject164__head_26707F24__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject16__head_3B6FF704__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject174__head_ECF6CC74__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject18__head_7CB94CE4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject202__head_503B6AE4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject237__head_1AF8F074__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject247__head_A70993B4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject31__head_09DBA9F4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject36__head_ABBB7824__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject39__head_E1727904__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject3__head_E0FE6334__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject46__head_9E9B4204__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject51__head_8C1E0DC4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject56__head_7065C2F4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject62__head_DB32DD24__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject87__head_BDBE9FC4__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject90__head_A1654F74__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject96__head_FC6F33F4__1
│   └── [ 0] __head_00000004__1
├── [ 6] 1.4_TEMP
├── [4.0K] 1.5_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject118__head_7B842745__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject123__head_5CEFA465__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject183__head_10F85FE5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject186__head_3000C235__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject191__head_BC532EE5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject224__head_92A1E3C5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject29__head_027ADDC5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject67__head_5BDDDAA5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject82__head_A64A01D5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject8__head_F68FCDE5__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject94__head_ECE61315__1
│   └── [ 0] __head_00000005__1
├── [ 6] 1.5_TEMP
├── [4.0K] 1.6_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject121__head_3F39BC76__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject12__head_88A43B86__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject13__head_FD5483D6__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject141__head_CDACF3C6__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject157__head_E56D1786__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject197__head_804A2DC6__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject207__head_A3266F06__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject216__head_DE5D3A76__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject226__head_CAFCCF66__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject44__head_0544C586__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject45__head_65E3FAB6__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject61__head_A2704176__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject72__head_6E3BA9D6__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject80__head_1B1E6F16__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject85__head_A0756156__1
│   └── [ 0] __head_00000006__1
├── [ 6] 1.6_TEMP
├── [4.0K] 1.7_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject100__head_EF2E6867__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject139__head_5E961CE7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject142__head_01BCFCA7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject143__head_A3CDD177__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject144__head_74883667__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject158__head_F5DD1EA7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject165__head_031366F7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject168__head_9BE47967__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject169__head_E96B49F7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject182__head_7119D257__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject194__head_D3C65A17__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject250__head_E9F80417__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject41__head_9295C277__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject55__head_70B967B7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject78__head_367C2607__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject89__head_7CDE0FC7__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject98__head_96668A47__1
│   └── [ 0] __head_00000007__1
├── [ 6] 1.7_TEMP
├── [4.0K] 1.8_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject116__head_F4318178__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject128__head_32ED4D38__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject134__head_486DF128__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject156__head_F2E7C068__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject190__head_5AAF63A8__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject219__head_50799E88__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject225__head_9EB9D0C8__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject242__head_F2EC42B8__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject33__head_64C2C558__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject34__head_65F25F38__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject38__head_D38CB608__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject49__head_FF4A4208__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject50__head_0F3F77A8__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject52__head_0F2A0548__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject69__head_BDEA5548__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject71__head_EA5383B8__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject76__head_02238F98__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject88__head_45AB4318__1
│   └── [ 0] __head_00000008__1
├── [ 6] 1.8_TEMP
├── [4.0K] 1.9_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject110__head_CE2B10A9__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject136__head_04EDA809__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject163__head_37F8AE49__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject177__head_5F771509__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject193__head_23935A99__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject215__head_15DEBEA9__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject21__head_11518C29__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject248__head_FAFE5689__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject27__head_5B7D7369__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject60__head_64B13949__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject70__head_F3A64E09__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject91__head_31511109__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject99__head_8A68E509__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject9__head_FC52C009__1
│   └── [ 0] __head_00000009__1
├── [ 6] 1.9_TEMP
├── [4.0K] 1.a_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject112__head_FF4D01DA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject113__head_CADF065A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject119__head_A72F161A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject122__head_BF4A0A6A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject124__head_5BC12E2A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject140__head_37A19CDA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject14__head_64FC3EDA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject152__head_14C2CA0A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject166__head_C47A6B0A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject171__head_79A2CFEA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject17__head_2A4A393A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject185__head_1D2DDDDA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject188__head_7FBA73AA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject204__head_AB1FBD8A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject205__head_B24EE76A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject206__head_9891B01A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject208__head_C07981EA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject210__head_1227C4DA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject228__head_0B9B39AA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject238__head_C56F1D5A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject23__head_4F279EBA__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject26__head_CCB3C59A__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject86__head_5ED418EA__1
│   └── [ 0] __head_0000000A__1
├── [ 6] 1.a_TEMP
├── [4.0K] 1.b_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject103__head_F8EF0F1B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject107__head_F3693ABB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject108__head_13D4D0EB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject109__head_11C8A99B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject132__head_227AA5FB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject137__head_5D82744B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject148__head_989FE31B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject159__head_422DA2AB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject178__head_01C4C3CB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject181__head_0DC3436B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject213__head_05B8BA6B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject246__head_2140121B__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject24__head_5B8169AB__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject64__head_CC62D71B__1
│   └── [ 0] __head_0000000B__1
├── [ 6] 1.b_TEMP
├── [4.0K] 1.c_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject101__head_7711D38C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject11__head_0218B38C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject127__head_9A51BCDC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject133__head_7E6B67EC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject135__head_C67A3E8C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject150__head_8635CA6C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject161__head_3A42EA3C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject170__head_612CFAFC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject172__head_EB33BB8C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject196__head_475FC6BC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject19__head_D21864EC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject200__head_52F1139C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject201__head_14AC036C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject243__head_3E9C237C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject245__head_DABE713C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject5__head_7CE3E36C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject65__head_86389E8C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject6__head_1FBF833C__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject81__head_05B34FAC__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject92__head_3988A13C__1
│   └── [ 0] __head_0000000C__1
├── [ 6] 1.c_TEMP
├── [4.0K] 1.d_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject111__head_66CE6C0D__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject115__head_71F498AD__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject120__head_1BFD990D__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject130__head_301FC3CD__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject232__head_320DC13D__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject37__head_AC68B41D__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject40__head_423D9AAD__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject47__head_0951C33D__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject84__head_5EB447DD__1
│   ├── [ 24] benchmark\\ulast\\umetadata__head_8E7A861D__1
│   └── [ 0] __head_0000000D__1
├── [ 6] 1.d_TEMP
├── [4.0K] 1.e_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject105__head_C439984E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject10__head_1BF35FCE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject131__head_DBAC810E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject146__head_6153061E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject176__head_80C2B02E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject189__head_4F8A59BE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject199__head_5ACCE8EE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject203__head_D77F59BE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject221__head_C3ADCA4E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject231__head_0C48675E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject233__head_F01E13EE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject30__head_9464F9BE__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject54__head_B1E1B44E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject59__head_CE84DF4E__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject79__head_CAFFBC0E__1
│   └── [ 0] __head_0000000E__1
├── [ 6] 1.e_TEMP
├── [4.0K] 1.f_head
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject106__head_48E9311F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject147__head_7211B38F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject155__head_551133AF__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject15__head_58EF207F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject192__head_9E2A248F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject198__head_D7D45B1F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject218__head_F2A93B5F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject239__head_2FC0444F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject241__head_286B4E0F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject42__head_6FF5EA1F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject57__head_2A0A792F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject63__head_2685437F__1
│   ├── [4.0M] benchmark\\udata\\usds-ceph-1.novalocal\\u12560\\uobject95__head_1786F82F__1
│   └── [ 0] __head_0000000F__1
├── [ 6] 1.f_TEMP
├── [ 4] commit_op_seq
├── [4.0K] meta
│   ├── [ 802] inc\\uosdmap.1__0_B65F4306__none
│   ├── [ 232] inc\\uosdmap.2__0_B65F40D6__none
│   ├── [ 635] inc\\uosdmap.3__0_B65F4066__none
│   ├── [ 763] inc\\uosdmap.4__0_B65F4136__none
│   ├── [ 406] inc\\uosdmap.5__0_B65F46C6__none
│   ├── [ 454] inc\\uosdmap.6__0_B65F4796__none
│   ├── [ 212] inc\\uosdmap.7__0_B65F4726__none
│   ├── [ 204] inc\\uosdmap.8__0_B65F44F6__none
│   ├── [ 598] osdmap.1__0_FD6E49B1__none
│   ├── [ 869] osdmap.2__0_FD6E4941__none
│   ├── [ 960] osdmap.3__0_FD6E4E11__none
│   ├── [1.1K] osdmap.4__0_FD6E4FA1__none
│   ├── [1.0K] osdmap.5__0_FD6E4F71__none
│   ├── [1.3K] osdmap.6__0_FD6E4C01__none
│   ├── [1.3K] osdmap.7__0_FD6E4DD1__none
│   ├── [1.3K] osdmap.8__0_FD6E4D61__none
│   ├── [ 511] osd\\usuperblock__0_23C2FCDE__none
│   └── [ 0] snapmapper__0_A468EC03__none
├── [ 0] nosnap
└── [ 179] omap
├── [1002] 000007.sst
├── [208K] 000009.log
├── [ 16] CURRENT
├── [ 37] IDENTITY
├── [ 0] LOCK
├── [ 15K] LOG
├── [ 108] MANIFEST-000008
├── [4.1K] OPTIONS-000008
├── [4.1K] OPTIONS-000011
└── [ 37] osd_uuid

34 directories, 299 files

[root@sds-ceph-1 ~]# rados ls -p rbd
benchmark_data_sds-ceph-1.novalocal_12560_object77
benchmark_data_sds-ceph-1.novalocal_12560_object220
benchmark_data_sds-ceph-1.novalocal_12560_object179
benchmark_data_sds-ceph-1.novalocal_12560_object22
benchmark_data_sds-ceph-1.novalocal_12560_object235
benchmark_data_sds-ceph-1.novalocal_12560_object149
benchmark_data_sds-ceph-1.novalocal_12560_object229
benchmark_data_sds-ceph-1.novalocal_12560_object126
benchmark_data_sds-ceph-1.novalocal_12560_object73
benchmark_data_sds-ceph-1.novalocal_12560_object236
benchmark_data_sds-ceph-1.novalocal_12560_object154
benchmark_data_sds-ceph-1.novalocal_12560_object138
benchmark_data_sds-ceph-1.novalocal_12560_object217
benchmark_data_sds-ceph-1.novalocal_12560_object211
benchmark_data_sds-ceph-1.novalocal_12560_object212
benchmark_data_sds-ceph-1.novalocal_12560_object209
benchmark_data_sds-ceph-1.novalocal_12560_object49
benchmark_data_sds-ceph-1.novalocal_12560_object38
benchmark_data_sds-ceph-1.novalocal_12560_object219
benchmark_data_sds-ceph-1.novalocal_12560_object52
benchmark_data_sds-ceph-1.novalocal_12560_object69
benchmark_data_sds-ceph-1.novalocal_12560_object225
benchmark_data_sds-ceph-1.novalocal_12560_object134
benchmark_data_sds-ceph-1.novalocal_12560_object190
benchmark_data_sds-ceph-1.novalocal_12560_object50
benchmark_data_sds-ceph-1.novalocal_12560_object156
benchmark_data_sds-ceph-1.novalocal_12560_object88
benchmark_data_sds-ceph-1.novalocal_12560_object76
benchmark_data_sds-ceph-1.novalocal_12560_object33
benchmark_data_sds-ceph-1.novalocal_12560_object128
benchmark_data_sds-ceph-1.novalocal_12560_object34
benchmark_data_sds-ceph-1.novalocal_12560_object242
benchmark_data_sds-ceph-1.novalocal_12560_object71
benchmark_data_sds-ceph-1.novalocal_12560_object116
benchmark_data_sds-ceph-1.novalocal_12560_object46
benchmark_data_sds-ceph-1.novalocal_12560_object39
benchmark_data_sds-ceph-1.novalocal_12560_object16
benchmark_data_sds-ceph-1.novalocal_12560_object51
benchmark_data_sds-ceph-1.novalocal_12560_object87
benchmark_data_sds-ceph-1.novalocal_12560_object36
benchmark_data_sds-ceph-1.novalocal_12560_object62
benchmark_data_sds-ceph-1.novalocal_12560_object164
benchmark_data_sds-ceph-1.novalocal_12560_object18
benchmark_data_sds-ceph-1.novalocal_12560_object202
benchmark_data_sds-ceph-1.novalocal_12560_object160
benchmark_data_sds-ceph-1.novalocal_12560_object0
benchmark_data_sds-ceph-1.novalocal_12560_object162
benchmark_data_sds-ceph-1.novalocal_12560_object3
benchmark_data_sds-ceph-1.novalocal_12560_object247
benchmark_data_sds-ceph-1.novalocal_12560_object237
benchmark_data_sds-ceph-1.novalocal_12560_object174
benchmark_data_sds-ceph-1.novalocal_12560_object90
benchmark_data_sds-ceph-1.novalocal_12560_object56
benchmark_data_sds-ceph-1.novalocal_12560_object31
benchmark_data_sds-ceph-1.novalocal_12560_object96
benchmark_data_sds-ceph-1.novalocal_12560_object65
benchmark_data_sds-ceph-1.novalocal_12560_object135
benchmark_data_sds-ceph-1.novalocal_12560_object101
benchmark_data_sds-ceph-1.novalocal_12560_object11
benchmark_data_sds-ceph-1.novalocal_12560_object172
benchmark_data_sds-ceph-1.novalocal_12560_object81
benchmark_data_sds-ceph-1.novalocal_12560_object150
benchmark_data_sds-ceph-1.novalocal_12560_object201
benchmark_data_sds-ceph-1.novalocal_12560_object5
benchmark_data_sds-ceph-1.novalocal_12560_object19
benchmark_data_sds-ceph-1.novalocal_12560_object133
benchmark_data_sds-ceph-1.novalocal_12560_object200
benchmark_data_sds-ceph-1.novalocal_12560_object127
benchmark_data_sds-ceph-1.novalocal_12560_object161
benchmark_data_sds-ceph-1.novalocal_12560_object92
benchmark_data_sds-ceph-1.novalocal_12560_object245
benchmark_data_sds-ceph-1.novalocal_12560_object6
benchmark_data_sds-ceph-1.novalocal_12560_object196
benchmark_data_sds-ceph-1.novalocal_12560_object243
benchmark_data_sds-ceph-1.novalocal_12560_object170
benchmark_data_sds-ceph-1.novalocal_12560_object117
benchmark_data_sds-ceph-1.novalocal_12560_object195
benchmark_data_sds-ceph-1.novalocal_12560_object125
benchmark_data_sds-ceph-1.novalocal_12560_object28
benchmark_data_sds-ceph-1.novalocal_12560_object4
benchmark_data_sds-ceph-1.novalocal_12560_object214
benchmark_data_sds-ceph-1.novalocal_12560_object167
benchmark_data_sds-ceph-1.novalocal_12560_object173
benchmark_data_sds-ceph-1.novalocal_12560_object153
benchmark_data_sds-ceph-1.novalocal_12560_object74
benchmark_data_sds-ceph-1.novalocal_12560_object227
benchmark_data_sds-ceph-1.novalocal_12560_object180
benchmark_data_sds-ceph-1.novalocal_12560_object93
benchmark_data_sds-ceph-1.novalocal_12560_object240
benchmark_data_sds-ceph-1.novalocal_12560_object53
benchmark_data_sds-ceph-1.novalocal_12560_object104
benchmark_data_sds-ceph-1.novalocal_12560_object175
benchmark_data_sds-ceph-1.novalocal_12560_object251
benchmark_data_sds-ceph-1.novalocal_12560_object97
benchmark_data_sds-ceph-1.novalocal_12560_object43
benchmark_data_sds-ceph-1.novalocal_12560_object230
benchmark_data_sds-ceph-1.novalocal_12560_object152
benchmark_data_sds-ceph-1.novalocal_12560_object166
benchmark_data_sds-ceph-1.novalocal_12560_object204
benchmark_data_sds-ceph-1.novalocal_12560_object124
benchmark_data_sds-ceph-1.novalocal_12560_object228
benchmark_data_sds-ceph-1.novalocal_12560_object188
benchmark_data_sds-ceph-1.novalocal_12560_object122
benchmark_data_sds-ceph-1.novalocal_12560_object205
benchmark_data_sds-ceph-1.novalocal_12560_object86
benchmark_data_sds-ceph-1.novalocal_12560_object208
benchmark_data_sds-ceph-1.novalocal_12560_object171
benchmark_data_sds-ceph-1.novalocal_12560_object206
benchmark_data_sds-ceph-1.novalocal_12560_object119
benchmark_data_sds-ceph-1.novalocal_12560_object26
benchmark_data_sds-ceph-1.novalocal_12560_object113
benchmark_data_sds-ceph-1.novalocal_12560_object238
benchmark_data_sds-ceph-1.novalocal_12560_object210
benchmark_data_sds-ceph-1.novalocal_12560_object140
benchmark_data_sds-ceph-1.novalocal_12560_object14
benchmark_data_sds-ceph-1.novalocal_12560_object112
benchmark_data_sds-ceph-1.novalocal_12560_object185
benchmark_data_sds-ceph-1.novalocal_12560_object17
benchmark_data_sds-ceph-1.novalocal_12560_object23
benchmark_data_sds-ceph-1.novalocal_12560_object207
benchmark_data_sds-ceph-1.novalocal_12560_object44
benchmark_data_sds-ceph-1.novalocal_12560_object12
benchmark_data_sds-ceph-1.novalocal_12560_object157
benchmark_data_sds-ceph-1.novalocal_12560_object197
benchmark_data_sds-ceph-1.novalocal_12560_object141
benchmark_data_sds-ceph-1.novalocal_12560_object226
benchmark_data_sds-ceph-1.novalocal_12560_object80
benchmark_data_sds-ceph-1.novalocal_12560_object85
benchmark_data_sds-ceph-1.novalocal_12560_object72
benchmark_data_sds-ceph-1.novalocal_12560_object13
benchmark_data_sds-ceph-1.novalocal_12560_object45
benchmark_data_sds-ceph-1.novalocal_12560_object121
benchmark_data_sds-ceph-1.novalocal_12560_object216
benchmark_data_sds-ceph-1.novalocal_12560_object61
benchmark_data_sds-ceph-1.novalocal_12560_object79
benchmark_data_sds-ceph-1.novalocal_12560_object131
benchmark_data_sds-ceph-1.novalocal_12560_object105
benchmark_data_sds-ceph-1.novalocal_12560_object54
benchmark_data_sds-ceph-1.novalocal_12560_object221
benchmark_data_sds-ceph-1.novalocal_12560_object59
benchmark_data_sds-ceph-1.novalocal_12560_object10
benchmark_data_sds-ceph-1.novalocal_12560_object176
benchmark_data_sds-ceph-1.novalocal_12560_object199
benchmark_data_sds-ceph-1.novalocal_12560_object233
benchmark_data_sds-ceph-1.novalocal_12560_object146
benchmark_data_sds-ceph-1.novalocal_12560_object231
benchmark_data_sds-ceph-1.novalocal_12560_object189
benchmark_data_sds-ceph-1.novalocal_12560_object203
benchmark_data_sds-ceph-1.novalocal_12560_object30
benchmark_data_sds-ceph-1.novalocal_12560_object234
benchmark_data_sds-ceph-1.novalocal_12560_object184
benchmark_data_sds-ceph-1.novalocal_12560_object1
benchmark_data_sds-ceph-1.novalocal_12560_object32
benchmark_data_sds-ceph-1.novalocal_12560_object244
benchmark_data_sds-ceph-1.novalocal_12560_object129
benchmark_data_sds-ceph-1.novalocal_12560_object35
benchmark_data_sds-ceph-1.novalocal_12560_object151
benchmark_data_sds-ceph-1.novalocal_12560_object25
benchmark_data_sds-ceph-1.novalocal_12560_object102
benchmark_data_sds-ceph-1.novalocal_12560_object9
benchmark_data_sds-ceph-1.novalocal_12560_object136
benchmark_data_sds-ceph-1.novalocal_12560_object70
benchmark_data_sds-ceph-1.novalocal_12560_object91
benchmark_data_sds-ceph-1.novalocal_12560_object99
benchmark_data_sds-ceph-1.novalocal_12560_object177
benchmark_data_sds-ceph-1.novalocal_12560_object248
benchmark_data_sds-ceph-1.novalocal_12560_object163
benchmark_data_sds-ceph-1.novalocal_12560_object60
benchmark_data_sds-ceph-1.novalocal_12560_object21
benchmark_data_sds-ceph-1.novalocal_12560_object110
benchmark_data_sds-ceph-1.novalocal_12560_object215
benchmark_data_sds-ceph-1.novalocal_12560_object27
benchmark_data_sds-ceph-1.novalocal_12560_object193
benchmark_data_sds-ceph-1.novalocal_12560_object118
benchmark_data_sds-ceph-1.novalocal_12560_object29
benchmark_data_sds-ceph-1.novalocal_12560_object224
benchmark_data_sds-ceph-1.novalocal_12560_object67
benchmark_data_sds-ceph-1.novalocal_12560_object123
benchmark_data_sds-ceph-1.novalocal_12560_object191
benchmark_data_sds-ceph-1.novalocal_12560_object8
benchmark_data_sds-ceph-1.novalocal_12560_object183
benchmark_data_sds-ceph-1.novalocal_12560_object94
benchmark_data_sds-ceph-1.novalocal_12560_object82
benchmark_data_sds-ceph-1.novalocal_12560_object186
benchmark_data_sds-ceph-1.novalocal_12560_object111
benchmark_data_sds-ceph-1.novalocal_12560_object120
benchmark_data_sds-ceph-1.novalocal_12560_object130
benchmark_data_sds-ceph-1.novalocal_12560_object115
benchmark_data_sds-ceph-1.novalocal_12560_object40
benchmark_data_sds-ceph-1.novalocal_12560_object37
benchmark_last_metadata
benchmark_data_sds-ceph-1.novalocal_12560_object84
benchmark_data_sds-ceph-1.novalocal_12560_object232
benchmark_data_sds-ceph-1.novalocal_12560_object47
benchmark_data_sds-ceph-1.novalocal_12560_object68
benchmark_data_sds-ceph-1.novalocal_12560_object48
benchmark_data_sds-ceph-1.novalocal_12560_object145
benchmark_data_sds-ceph-1.novalocal_12560_object20
benchmark_data_sds-ceph-1.novalocal_12560_object75
benchmark_data_sds-ceph-1.novalocal_12560_object66
benchmark_data_sds-ceph-1.novalocal_12560_object83
benchmark_data_sds-ceph-1.novalocal_12560_object223
benchmark_data_sds-ceph-1.novalocal_12560_object114
benchmark_data_sds-ceph-1.novalocal_12560_object7
benchmark_data_sds-ceph-1.novalocal_12560_object187
benchmark_data_sds-ceph-1.novalocal_12560_object2
benchmark_data_sds-ceph-1.novalocal_12560_object58
benchmark_data_sds-ceph-1.novalocal_12560_object222
benchmark_data_sds-ceph-1.novalocal_12560_object249
benchmark_data_sds-ceph-1.novalocal_12560_object137
benchmark_data_sds-ceph-1.novalocal_12560_object178
benchmark_data_sds-ceph-1.novalocal_12560_object159
benchmark_data_sds-ceph-1.novalocal_12560_object24
benchmark_data_sds-ceph-1.novalocal_12560_object213
benchmark_data_sds-ceph-1.novalocal_12560_object181
benchmark_data_sds-ceph-1.novalocal_12560_object108
benchmark_data_sds-ceph-1.novalocal_12560_object246
benchmark_data_sds-ceph-1.novalocal_12560_object148
benchmark_data_sds-ceph-1.novalocal_12560_object64
benchmark_data_sds-ceph-1.novalocal_12560_object103
benchmark_data_sds-ceph-1.novalocal_12560_object109
benchmark_data_sds-ceph-1.novalocal_12560_object107
benchmark_data_sds-ceph-1.novalocal_12560_object132
benchmark_data_sds-ceph-1.novalocal_12560_object78
benchmark_data_sds-ceph-1.novalocal_12560_object98
benchmark_data_sds-ceph-1.novalocal_12560_object89
benchmark_data_sds-ceph-1.novalocal_12560_object142
benchmark_data_sds-ceph-1.novalocal_12560_object158
benchmark_data_sds-ceph-1.novalocal_12560_object100
benchmark_data_sds-ceph-1.novalocal_12560_object144
benchmark_data_sds-ceph-1.novalocal_12560_object168
benchmark_data_sds-ceph-1.novalocal_12560_object139
benchmark_data_sds-ceph-1.novalocal_12560_object250
benchmark_data_sds-ceph-1.novalocal_12560_object194
benchmark_data_sds-ceph-1.novalocal_12560_object182
benchmark_data_sds-ceph-1.novalocal_12560_object55
benchmark_data_sds-ceph-1.novalocal_12560_object41
benchmark_data_sds-ceph-1.novalocal_12560_object143
benchmark_data_sds-ceph-1.novalocal_12560_object165
benchmark_data_sds-ceph-1.novalocal_12560_object169
benchmark_data_sds-ceph-1.novalocal_12560_object241
benchmark_data_sds-ceph-1.novalocal_12560_object192
benchmark_data_sds-ceph-1.novalocal_12560_object147
benchmark_data_sds-ceph-1.novalocal_12560_object239
benchmark_data_sds-ceph-1.novalocal_12560_object95
benchmark_data_sds-ceph-1.novalocal_12560_object57
benchmark_data_sds-ceph-1.novalocal_12560_object155
benchmark_data_sds-ceph-1.novalocal_12560_object42
benchmark_data_sds-ceph-1.novalocal_12560_object106
benchmark_data_sds-ceph-1.novalocal_12560_object198
benchmark_data_sds-ceph-1.novalocal_12560_object218
benchmark_data_sds-ceph-1.novalocal_12560_object15
benchmark_data_sds-ceph-1.novalocal_12560_object63

部署kubernetes有多种方式:

1
2
3
4
5
6
7
8
9
10
11
12
1、不借助任何工具,从基本的CLI操作开始部署k8s集群,可以学习k8s各个组件
https://github.com/kelseyhightower/kubernetes-the-hard-way
2、部署单节点k8s集群,适合快速学习
https://github.com/kubernetes/minikube
3、部署多节点k8s集群
https://github.com/kubernetes/kubeadm
4、在cloud上部署k8s集群
https://github.com/kubernetes/kops
5、coreos的工具,大于十个节点收费,小于等于十个节点免费
https://coreos.com/tectonic/
6、实验环境
https://labs.play-with-k8s.com/

安装minikube

mac

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
1、安装kubectl,文档https://kubernetes.io/docs/tasks/tools/install-kubectl/
yujiangdeMBP-13:~ yujiang$ brew install kubernetes-cli

2、安装minikube,文档https://github.com/kubernetes/minikube/releases
0.32.0版本在minikube start时遇到了Starting cluster components失败,在网上查找资料需要降级0.25.2。如果想使用新版本,还是找台虚拟机试一下。
yujiangdeMBP-13:~ yujiang$ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.25.2/minikube-darwin-amd64 && chmod +x minikube && sudo cp minikube /usr/local/bin/ && rm minikube

如果已经安装了0.32.0版本的minikube,降级方法请看链接:https://coding.m.imooc.com/questiondetail.html?qid=57784

3、安装virtualbox

4、启动minikube单节点k8s(https://www.jianshu.com/p/a7620f73c7f3)
yujiangdeMBP-13:~ yujiang$ minikube start
Starting local Kubernetes v1.9.4 cluster...
Starting VM...
Getting VM IP address...
Moving files into cluster...
Setting up certs...
Connecting to cluster...
Setting up kubeconfig...
Starting cluster components...
Kubectl is now configured to use the cluster.
Loading cached images from config file.

=====================================以下为测试,有坑,不需要执行。
yujiangdeMBP-13:~ yujiang$ rm -rf ~/.minikube/machines/minikube/
yujiangdeMBP-13:~ yujiang$ minikube start
yujiangdeMBP-13:~ yujiang$ minikube start --kubernetes-version v1.8.0 --bootstrapper kubeadm
=====================================以上为测试,有坑,不需要执行。

minikube start命令创建一个名为“minikube”的“kubectl context”。这个context包含与Minikube群集通信的配置。Minikube会自动将此context设置为默认值,但如果您以后需要切换回它,请运行:kubectl config use-context minikube,或者传递每个命令的context,如下所示:kubectl get pods --context=minikube。可以使用不同的context连接不同的k8s集群。

查看当前config
yujiangdeMBP-13:~ yujiang$ kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority: /Users/yujiang/.minikube/ca.crt
server: https://192.168.99.101:8443
name: minikube
contexts:
- context:
cluster: minikube
user: minikube
name: minikube
current-context: minikube
kind: Config
preferences: {}
users:
- name: minikube
user:
client-certificate: /Users/yujiang/.minikube/client.crt
client-key: /Users/yujiang/.minikube/client.key

查看当前context
yujiangdeMBP-13:~ yujiang$ kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
* minikube minikube minikube

查看当前k8s集群情况
yujiangdeMBP-13:~ yujiang$ kubectl cluster-info
Kubernetes master is running at https://192.168.99.100:8443

进入minikube虚拟机
yujiangdeMBP-13:~ yujiang$ minikube ssh

设置代理(ss-ng),否则无法pull docker镜像(https://blog.zhesih.com/2018/06/24/k8s-minikube-setup/)
$ sudo vi /etc/systemd/system/docker.service.d/http-proxy.conf
$ sudo systemctl daemon-reload
$ sudo systemctl restart docker
$ systemctl show --property=Environment docker
Environment=DOCKER_RAMDISK=yes HTTP_PROXY=http://192.168.199.165:1087 HTTPS_PROXY=https://192.168.199.165:1087

实验一(kind: Pod)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
Pod是k8s调度的最小单位,一个Pod中可以有多个Container
1、创建一个Pod
yujiangdeMBP-13:k8s_yaml yujiang$ cat pod_nginx.yml
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

yujiangdeMBP-13:k8s_yaml yujiang$ kubectl create -f pod_nginx.yml
pod/nginx created

2、查看Pod
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx 1/1 Running 0 20m
kube-system kube-addon-manager-minikube 1/1 Running 8 60m
kube-system kube-dns-54cccfbdf8-kr847 3/3 Running 12 57m
kube-system kubernetes-dashboard-77d8b98585-vq77b 1/1 Running 4 57m
kube-system storage-provisioner 1/1 Running 5 57m

3、查看Pod的详细信息,可以看到Pod运行在哪台机器上
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 31m 172.17.0.2 minikube <none> <none>

4、打开dashboard
yujiangdeMBP-13:k8s_yaml yujiang$ minikube dashboard

5、查询nginx IP
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
fb08befd952b bridge bridge local
2de0a434731f host host local
4cd7f4c7083c none null local

$ docker network inspect bridge
[
{
"Name": "bridge",
"Id": "fb08befd952b4448e86e491f271a1e6a50056b5a18bdfb8a3e930edde2ac44da",
"Created": "2018-12-24T17:23:11.618994916Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"25e63a3ff8a091319fdec698f554f1b47c3e019fbdda9b06709d15630003ee6e": {
"Name": "k8s_POD_kube-dns-54cccfbdf8-kr847_kube-system_69d32f1a-0799-11e9-be08-080027bea66e_8",
"EndpointID": "004b9a81ab36cd4e0aba21572b4736b8d85d5541c3533571a67383e5d07e34b6",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"4138eeb8348781495987d47f6683138515111d2b6afa6fc7352d5f98d4a0858b": {
"Name": "k8s_POD_kubernetes-dashboard-77d8b98585-vq77b_kube-system_69b46ed4-0799-11e9-be08-080027bea66e_8",
"EndpointID": "bb31db755c5a5cd8f36bec35d89c73267d3d283ba8caa691b58ba504b0d6fabc",
"MacAddress": "02:42:ac:11:00:04",
"IPv4Address": "172.17.0.4/16",
"IPv6Address": ""
},
"6874ee85f9257b5b0dfdf808ebf92df9c3c677079a778980956e4655abf23507": {
"Name": "k8s_POD_nginx_default_9e0d9ffc-079e-11e9-be08-080027bea66e_0",
"EndpointID": "040a5616a76665ff9e35bb6cef2388f8db443b2db94b67282a7496b9803acc20",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]

$ curl 172.17.0.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

6、指定Pod名,进入Container(默认进入第1个Container)。如果Pod中有多个Container,可以加"-c"参数指定进入哪个Container。
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl exec -it nginx sh

7、打印nginx Pod的详细信息
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl describe pod nginx

8、怎样把nginx的端口映射出来?
有两种方法:
(1)port-forward
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl port-forward nginx 8080:80
打开浏览器:http://127.0.0.1:8080/

实验二(kind: ReplicationController)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
yujiangdeMBP-13:k8s_yaml yujiang$ cat rc_nginx.yml 
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx
spec:
replicas: 3
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl create -f rc_nginx.yml
replicationcontroller/nginx created

yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-prlfw 1/1 Running 0 19s
nginx-szrh8 1/1 Running 0 19s
nginx-z59kd 1/1 Running 0 19s

# 修改横向扩展数量
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl scale rc nginx --replicas=2
replicationcontroller/nginx scaled
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-szrh8 1/1 Running 1 21h
nginx-z59kd 1/1 Running 1 21h
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rc
NAME DESIRED CURRENT READY AGE
nginx 2 2 2 21h
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-szrh8 1/1 Running 1 21h 172.17.0.4 minikube <none> <none>
nginx-z59kd 1/1 Running 1 21h 172.17.0.6 minikube <none> <none>

实验三(kind: ReplicaSet)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: nginx
labels:
tier: frontend
spec:
replicas: 3
selector:
matchLabels:
tier: frontend
template:
metadata:
name: nginx
labels:
tier: frontend
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

yujiangdeMBP-13:k8s_yaml yujiang$ kubectl create -f rs_nginx.yml
replicaset.apps/nginx created
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx 3 3 3 52s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-brtzk 1/1 Running 0 81s
nginx-m5xgq 1/1 Running 0 81s
nginx-qvdq6 1/1 Running 0 81s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl scale rs nginx --replicas=2
replicaset.extensions/nginx scaled
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx 2 2 2 2m32s

实验四(kind: Deployment)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
yujiangdeMBP-13:k8s_yaml yujiang$ cat deployment_nginx.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.12.2
ports:
- containerPort: 80
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl create -f deployment_nginx.yml
deployment.apps/nginx-deployment created
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 3 3 5m31s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx-deployment-9898d9674 3 3 3 6m6s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-9898d9674-2mrdl 1/1 Running 0 6m31s
nginx-deployment-9898d9674-ndblx 1/1 Running 0 6m31s
nginx-deployment-9898d9674-t8ngw 1/1 Running 0 6m31s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get deployment -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx-deployment 3/3 3 3 7m31s nginx nginx:1.12.2 app=nginx

# 更新nginx
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl set image deployment nginx-deployment nginx=nginx:1.13
deployment.extensions/nginx-deployment image updated
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 3 3 12m
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get deployment -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx-deployment 3/3 3 3 12m nginx nginx:1.13 app=nginx
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx-deployment-9898d9674 0 0 0 13m
nginx-deployment-d8d99448f 3 3 3 3m42s
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-d8d99448f-fzfdw 1/1 Running 0 3m59s
nginx-deployment-d8d99448f-l8m8w 1/1 Running 0 72s
nginx-deployment-d8d99448f-pbnml 1/1 Running 0 73s

# 回滚到上一次更新前
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl rollout history deployment nginx-deployment
deployment.extensions/nginx-deployment
REVISION CHANGE-CAUSE
1 <none>
2 <none>
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl rollout undo deployment nginx-deployment
deployment.extensions/nginx-deployment rolled back
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get deployment -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx-deployment 3/3 3 3 15m nginx nginx:1.12.2 app=nginx
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl rollout history deployment nginx-deployment
deployment.extensions/nginx-deployment
REVISION CHANGE-CAUSE
2 <none>
3 <none>

# 暴露nginx Container端口到宿主机(node节点)
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
minikube Ready <none> 23h v1.9.4 192.168.99.100 <none> Buildroot 2017.11 4.9.64 docker://17.9.0
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl expose deployment nginx-deployment --type=NodePort
service/nginx-deployment exposed
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h
nginx-deployment NodePort 10.98.51.33 <none> 80:30233/TCP 68s
# 打开浏览器http://192.168.99.100:30233/

实验五(kubectl自动补全)

1
https://kubernetes.io/docs/tasks/tools/install-kubectl/

kubeadm

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# 1、创建kubernetes虚拟机,并关闭防火墙,关闭SELinux(所有节点执行)
systemctl disable firewalld.service && systemctl stop firewalld.service
setenforce 0
# 2、
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache

yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.1.ce-1.el7.centos.noarch.rpm

yum install -y docker-ce-<17.03.1.ce-1.el7.centos>

systemctl enable docker.service
systemctl start docker.service

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.11.0-0 kubeadm-1.11.0-0 kubectl-1.11.0-0
systemctl enable kubelet.service
systemctl start kubelet.service

vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"

systemctl daemon-reload
systemctl restart kubelet.service

docker pull quay.io/calico/typha:v0.7.4
docker pull quay.io/calico/node:v3.1.3
docker pull quay.io/calico/cni:v3.1.3

vi kubeadm-master.config
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.0
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
api:
advertiseAddress: <10.0.90.217>
controllerManagerExtraArgs:
node-monitor-grace-period: 10s
pod-eviction-timeout: 10s
networking:
podSubnet: <10.211.0.0>/16
serviceSubnet: <10.96.0.0>/16
kubeProxy:
config:
mode: iptables

kubeadm config images pull --config kubeadm-master.config

# 执行此CLI会输出token,需要保存该token
kubeadm init --config kubeadm-master.config
kubeadm reset
swapoff -a
sudo systemctl status kubelet.service

kubectl get pod --all-namespaces -o wide

添加node
kubeadm join 192.168.56.201:6443 --token mzbbw0.5z6zpgauylva58na --discovery-token-ca-cert-hash sha256:932a24f271bd2c14cb41d6698e9781c1fcede848b7b9fe4a0731b9c54f275df2

Vagrantfile

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
yujiangdeMacBook-Pro-13:docker-k8s yujiang$ cat Vagrantfile 
# -*- mode: ruby -*-
# vi: set ft=ruby :

Vagrant.require_version ">= 1.6.0"
boxes = [
{
:name => "k8s-node-1",
:eth1 => "192.168.56.61",
:mem => "2048",
:cpu => "2"
},
{
:name => "k8s-node-2",
:eth1 => "192.168.56.62",
:mem => "2048",
:cpu => "2"
},
]

Vagrant.configure(2) do |config|
config.vm.box = "centos/centos7"
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = opts[:mem]
v.vmx["numvcpus"] = opts[:cpu]
end

config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", opts[:mem]]
v.customize ["modifyvm", :id, "--cpus", opts[:cpu]]
end
config.vm.network :private_network, ip:opts[:eth1]
end
end

config.vm.provision "shell", inline: <<-SHELL
# install dependent
systemctl disable firewalld.service && systemctl stop firewalld.service && setenforce 0
sed -i "s/\(SELINUX=\).*/\1disabled/g" /etc/selinux/config
# close swapoff
swapoff -a

# install docker
cat <<-'EOF' > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

yum install -y ipset ipvsadm
cat <<-'EOF' > /etc/sysconfig/modules/ipvs.modules
\#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum makecache && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.1.ce-1.el7.centos.noarch.rpm && yum install -y docker-ce-17.03.1.ce-1.el7.centos
systemctl enable docker.service && systemctl start docker.service

# install k8s
cat <<-'EOF' > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.11.0-0 kubeadm-1.11.0-0 kubectl-1.11.0-0

cat <<-'EOF' > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
EOF

systemctl enable kubelet.service && systemctl start kubelet.service

docker pull quay.io/calico/typha:v0.7.4
docker pull quay.io/calico/node:v3.1.3
docker pull quay.io/calico/cni:v3.1.3

SHELL
end

编译安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
克隆nginx项目与nginx-rtmp-module

[root@cephJ ~]# yum install gcc pcre-devel openssl-devel epel-release -y

[root@cephJ ~]# rpm --import http://li.nux.ro/download/nux/RPM-GPG-KEY-nux.ro
[root@cephJ ~]# rpm -Uvh http://li.nux.ro/download/nux/dextop/el7/x86_64/nux-dextop-release-0-5.el7.nux.noarch.rpm
[root@cephJ ~]# yum -y install ffmpeg ffmpeg-devel

[root@cephJ ~]# cd github/
[root@cephJ github]# git clone https://github.com/nginx/nginx.git
[root@cephJ github]# cd nginx
[root@cephJ github]# git checkout -b myrelease-1.15.6 release-1.15.6

[root@cephJ github]# git clone https://github.com/arut/nginx-rtmp-module.git
[root@cephJ github]# cd nginx-rtmp-module
[root@cephJ github]# git checkout -b myv1.2.1 v1.2.1

[root@cephJ ~]# tree github/ -L 1
github/
├── nginx
└── nginx-rtmp-module

[root@cephJ ~]# cd github/nginx
[root@cephJ nginx]# ./auto/configure --prefix=/usr/bin/ --add-module=../nginx-rtmp-module/ --with-http_ssl_module --with-debug
[root@cephJ nginx]# make -j 8
[root@cephJ nginx]# make install

配置nginx

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@cephJ ~]# vim /usr/local/nginx/conf/nginx.conf
# 添加如下配置
rtmp { # RTMP服务
server {
listen 1935; # 服务端口
chunk_size 4096; # 数据传输块的大小
application vod {
play /root/videos/; # 视频文件存放位置
}
}
}


启动nginx
[root@cephJ ~]# /usr/local/nginx/sbin/nginx
[root@cephJ ~]# ps -ef | grep nginx
root 20015 1 0 11:24 ? 00:00:00 nginx: master process /usr/local/nginx/sbin/nginx
nobody 20016 20015 0 11:24 ? 00:00:00 nginx: worker process
root 20019 12686 0 11:24 pts/2 00:00:00 grep --color=auto nginx


概念

FFmpeg

FFmpeg 是一个库和工具的集合,用于处理多媒体,如音频、视频、字幕和相关元数据。(FF指的是Fast Forward ——快速前进)

1
2
3
相关链接:
【1】https://zh.wikipedia.org/wiki/FFmpeg
【2】https://github.com/FFmpeg/FFmpeg

  • read_ahead_kb

定义sequential read operation期间OS可以预读的最大千字节数(kb)。因此,可能需要的信息已存在于kernel page cache中,以便进行下一次sequential read,从而提高read I/O性能。

Device mappers通常受益于高read_ahead_kb值。每个要mapped的设备128 KB是一个很好的起点,但将read_ahead_kb值增加到4-8 MB可能会提高sequential read大文件应用程序的性能。

课程介绍

  • go语言简介
  • beego框架介绍
  • beego环境搭建
  • 应用实践 —— 看图猜电影项目
  • 项目部署与发布
  • 项目总结

go简介

  • 语法简明紧凑
    • 语法一致
    • 噪音少
  • 简洁的并发
    • go在语言级并发抽象
    • 简洁优雅
  • 执行速度与开发效率兼优
    • 编译型语言
    • 语法简明

环境搭建

beego安装

  • beego的安装
1
yujiangdeMBP-13:~ yujiang$ go get -u -v github.com/astaxie/beego
  • bee工具安装

bee工具是什么?

​ 一个工具包,为了协助快速开发beego项目而创建的项目。(beego项目创建、热编译、开发测试、部署)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
yujiangdeMBP-13:~ yujiang$ go get -u -v github.com/beego/bee

yujiangdeMBP-13:~ yujiang$ bee version
______
| ___ \
| |_/ / ___ ___
| ___ \ / _ \ / _ \
| |_/ /| __/| __/
\____/ \___| \___| v1.10.0

├── Beego : 1.10.1
├── GoVersion : go1.10
├── GOOS : darwin
├── GOARCH : amd64
├── NumCPU : 4
├── GOPATH : /Users/yujiang/go
├── GOROOT : /usr/local/go
├── Compiler : gc
└── Date : Sunday, 28 Oct 2018

beego框架介绍

特点

  • 快速开发

  • MVC架构

  • 文档齐全,社区活跃

bee工具应用

  • bee new:新建项目结构

  • bee run:自动编译部署

  • bee generate:自动生成代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
yujiangdeMBP-13:~ yujiang$ bee new imooc
yujiangdeMBP-13:imooc yujiang$ bee run
浏览器输入 http://localhost:8080/

yujiangdeMBP-13:imooc yujiang$ tree
.
├── conf
│   └── app.conf
├── controllers
│   └── default.go
├── main.go
├── models
├── routers
│   └── router.go
├── static
│   ├── css
│   ├── img
│   └── js
│   └── reload.min.js
├── tests
│   └── default_test.go
└── views
└── index.tpl

10 directories, 7 files

beego项目运行基本流程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
文件 - main.go
package main
import (
_ "imooc/routers" // _表示只执行imooc/routers的init()函数
"github.com/astaxie/beego"
)
func main() {
beego.Run()
}

文件 - routers/router.go
package routers
import (
"imooc/controllers"
"github.com/astaxie/beego"
)
func init() {
//
beego.Router("/", &controllers.MainController{})
}

文件 - controllers/default.go
package controllers
import (
"github.com/astaxie/beego"
)
type MainController struct {
beego.Controller
}
func (c *MainController) Get() {
// 这里就是处理get请求的逻辑
c.Data["Website"] = "beego.me" // 向模板传递的数据
c.Data["Email"] = "astaxie@gmail.com" // 向模板传递的数据
c.TplName = "index.tpl" // 渲染的模板文件
}

beego通过浏览器传递参数的方法:(http://localhost:8080/?name="yujiang")
package controllers
import (
"github.com/astaxie/beego"
)
type MainController struct {
beego.Controller
}
func (c *MainController) Get() {
name := c.GetString("name")
//c.Data["Website"] = "beego.me"
c.Data["Website"] = name
c.Data["Email"] = "astaxie@gmail.com"
c.TplName = "index.tpl"
}

beego如何进行数据交互

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
[root@localhost ~]# yum install mariadb-server -y
[root@localhost ~]# systemctl start mariadb
[root@localhost ~]# systemctl enable mariadb
[root@localhost ~]# mysql -u root -p

MariaDB [(none)]> create database imooc;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> use imooc;
MariaDB [imooc]> CREATE TABLE `user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(128) NOT NULL DEFAULT '', `gender` tinyint(4) NOT NULL DEFAULT '0', `age` int(11) NOT NULL DEFAULT '0', PRIMARY KEY (`id`) )ENGINE=InnoDB DEFAULT CHARSET=utf8;
Query OK, 0 rows affected (0.00 sec)

MariaDB [imooc]> INSERT INTO user (name,gender,age) values('zhangsan',1,21);
Query OK, 1 row affected (0.00 sec)
MariaDB [imooc]> INSERT INTO user (name,gender,age) values('lisi',0,22);
Query OK, 1 row affected (0.01 sec)
MariaDB [imooc]> INSERT INTO user (name,gender,age) values('wangwu',1,20);
Query OK, 1 row affected (0.00 sec)

MariaDB [imooc]> select * from user;
+----+----------+--------+-----+
| id | name | gender | age |
+----+----------+--------+-----+
| 1 | zhangsan | 1 | 21 |
| 2 | lisi | 0 | 22 |
| 3 | wangwu | 1 | 20 |
+----+----------+--------+-----+
3 rows in set (0.00 sec)

生成代码
yujiangdeMBP-13:imooc yujiang$ bee generate scaffold user -fields="id:int64,name:string,gender:int,age:int" -driver=mysql -conn="root:@tcp(192.168.56.101:3306)/imooc"
______
| ___ \
| |_/ / ___ ___
| ___ \ / _ \ / _ \
| |_/ /| __/| __/
\____/ \___| \___| v1.10.0
2018/10/28 19:30:25 INFO ▶ 0001 Do you want to create a 'user' model? [Yes|No]
Yes
2018/10/28 19:31:34 INFO ▶ 0002 Using 'User' as model name
2018/10/28 19:31:34 INFO ▶ 0003 Using 'models' as package name
create /Users/yujiang/go/src/imooc/models/user.go
2018/10/28 19:31:34 INFO ▶ 0004 Do you want to create a 'user' controller? [Yes|No]
Yes
2018/10/28 19:31:43 INFO ▶ 0005 Using 'User' as controller name
2018/10/28 19:31:43 INFO ▶ 0006 Using 'controllers' as package name
2018/10/28 19:31:43 INFO ▶ 0007 Using matching model 'User'
create /Users/yujiang/go/src/imooc/controllers/user.go
2018/10/28 19:31:43 INFO ▶ 0008 Do you want to create views for this 'user' resource? [Yes|No]
Yes
2018/10/28 19:31:52 INFO ▶ 0009 Generating view...
create /Users/yujiang/go/src/imooc/views/user/index.tpl
create /Users/yujiang/go/src/imooc/views/user/show.tpl
create /Users/yujiang/go/src/imooc/views/user/create.tpl
create /Users/yujiang/go/src/imooc/views/user/edit.tpl
2018/10/28 19:31:52 INFO ▶ 0010 Do you want to create a 'user' migration and schema for this resource? [Yes|No]
No
2018/10/28 19:32:11 INFO ▶ 0011 Do you want to migrate the database? [Yes|No]
No
2018/10/28 19:32:13 SUCCESS ▶ 0012 All done! Don't forget to add beego.Router("/user" ,&controllers.UserController{}) to routers/route.go

2018/10/28 19:32:13 SUCCESS ▶ 0013 Scaffold successfully generated!

yujiangdeMBP-13:imooc yujiang$ tree
.
├── conf
│   └── app.conf
├── controllers
│   ├── default.go
│   └── user.go
├── imooc
├── main.go
├── models
│   └── user.go
├── routers
│   └── router.go
├── static
│   ├── css
│   ├── img
│   └── js
│   └── reload.min.js
├── tests
│   └── default_test.go
└── views
├── index.tpl
└── user
├── create.tpl
├── edit.tpl
├── index.tpl
└── show.tpl

11 directories, 14 files

安装go-sql-driver

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
yujiangdeMBP-13:~ yujiang$ go get -v -u github.com/go-sql-driver/mysql

文件 - routers/router.go
package routers
import (
"github.com/astaxie/beego"
"imooc/controllers"
)
func init() {
//beego.Router("/", &controllers.MainController{})
beego.Include(&controllers.UserController{})
}

文件 - main.go
package main
import (
_ "imooc/routers"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
)
func main() {
orm.RegisterDataBase("default","mysql","root:root@tcp(192.168.56.101:3306)/imooc")
beego.Run()
}

yujiangdeMBP-13:imooc yujiang$ bee run
______
| ___ \
| |_/ / ___ ___
| ___ \ / _ \ / _ \
| |_/ /| __/| __/
\____/ \___| \___| v1.10.0
2018/10/28 20:31:44 INFO ▶ 0001 Using 'imooc' as 'appname'
2018/10/28 20:31:44 INFO ▶ 0002 Initializing watcher...
imooc
2018/10/28 20:31:46 SUCCESS ▶ 0003 Built Successfully!
2018/10/28 20:31:46 INFO ▶ 0004 Restarting 'imooc'...
2018/10/28 20:31:46 SUCCESS ▶ 0005 './imooc' is running...
2018/10/28 20:31:46.697 [I] [router.go:269] /Users/yujiang/go/src/imooc/controllers no changed
2018/10/28 20:31:46.719 [I] [asm_amd64.s:2361] http server Running on http://:8080
2018/10/28 20:31:55.475 [D] [server.go:2694] | ::1| 200 | 3.979416ms| match| GET / r:/

应用实践

CBT - The Ceph Benchmarking Tool

INTRODUCTION(介绍)

CBT是一个用python编写的测试工具,可以自动执行并测试Ceph集群性能相关的任务。CBT不安装Ceph软件包,这应该在使用CBT之前完成。CBT可以在测试运行开始时创建 OSDs,也可以在测试运行期间重新创建OSD,或者只针对现有集群运行。CBT使用collectl记录系统指标,它可以选择使用perf,blktrace和valgrind在内的多种工具收集更多信息。除基本基准测试外,CBT还可以进行高级测试,包括自动OSD中断,erasure coded pools和cache tier配置。主要基准模块解释如下。

radosbench

RADOS基准测试使用ceph-common软件包附带的rados二进制文件。它包含一个基准测试工具,它通过librados来运行集群,librados是Ceph提供的底层object storage API。目前,RADOS基准模块为每个client创建一个pool。

librbdfio

librbdfio基准模块是测试Ceph集群的块存储性能的最简单方法。最新版本的flexible IO tester (fio)提供了RBD引擎。这允许fio通过用户态librbd库测试RBD volumes的块存储性能,而无需配置KVM/QEMU。这些库与QEMU后端使用的库相同,因此它很近似于KVM/QEMU性能。

kvmrbdfio

kvmrbdfio基准测试使用flexible IO tester (fio) 来运行已attache到KVM instance的RBD volume。它要求在使用CBT之前创建instance,并attache RBD volumes。此模块用于对通过Cinder创建的RBD并attache到OpenStack instance进行基准测试。或者,可以使用Vagrant或Virtual Machine Manager来提供instance。

rbdfio

rbdfio基准测试使用flexible IO tester (fio) 来执行使用KRBD内核驱动映射到块设备的RBD volume。此模块最适用于模拟需要直接使用块设备的应用程序,而不是在虚拟机内运行。

PREREQUISITES(必备条件)

CBT使用多个库和工具来运行:

  1. python-yaml - 用于读取配置文件的python的YAML库。
  2. python-lxml - 功能强大的Pythonic XML处理库,将libxml2/libxslt与ElementTree API相结合
  3. ssh (and scp) - 远程安全命令执行和数据传输
  4. pdsh (and pdcp) - 并行ssh和scp实现
  5. ceph - 可扩展的分布式存储系统

请注意,目前没有为基于RHEL7和CentOS 7发行版的pdsh。这些RPM包可在此处获得:

如果需要,可以使用可选工具和基准:

  1. collectl - 系统数据收集
  2. blktrace - 块设备IO跟踪
  3. seekwatcher - 通过blktrace数据创建图表
  4. perf - 系统和进程分析
  5. valgrind - 特定进程的runtime memory和cpu分析
  6. fio - 基准suite,集成了posix,libaio和librbd的支持
  7. cosbench - Intel的对象存储基准

USER AND NODE SETUP(用户和节点设置)

除上述软件外,还必须有许多节点才能运行测试。这些节点可以划分为多个类别。多个类别可以包含相同的host,如果它承担多个角色(例如,运行OSD和mon)。

  1. head - 运行常规ceph命令的节点
  2. clients - 运行基准测试或其他客户端工具的节点
  3. osds - OSD存在的节点
  4. rgws - rgw servers节点
  5. mons - mons节点

还可以指定用户运行所有远程命令。用于运行cbt的主机必须能够以指定用户身份发出无密码ssh命令。这可以通过创建无密码ssh密钥来完成:

1
ssh-keygen -t dsa

并将/.ssh中生成的公钥复制到所有远程主机上的/.ssh/authorized_key文件中。

此用户还必须能够使用sudo运行某些命令。实现此目的的最简单方法是为该用户简单地启用全局无密码sudo访问,但这仅适用于实验室环境。这可以通过运行visudo并添加如下内容来实现:

1
2
# passwordless sudo for cbt
<user> ALL=(ALL) NOPASSWD:ALL

其中<user>是将具有密码sudo访问权限的用户。

有关具体细节,请参阅操作系统文档。

除了上述内容之外,还需要将所有osds和mons添加到ssh的已知主机列表中(/etc/hosts文件)才能正常执行。 否则,基准测试将无法运行。

请注意,如果sudoers文件需要tty,则pdsh命令可能会有困难。如果是这种情况,推荐在visudo中添加Defaults requiretty行。

DISK PARTITIONING(磁盘分区)

目前,CBT为Ceph OSD data和journal分区在/dev/disk/by-partlabel中寻找指定分区labels。在未来的某个时刻,这将变得更加灵活,因为现在这是预期的行为。每个OSD主机分区应该用下面的gpt labels指定:

1
2
osd-device-<num>-data
osd-device-<num>-journal

<num>是从0开始并以系统上最后一个设备结束的device序列。目前,cbt假设系统中的所有节点具有相同数量的device。有一个脚本可以显示我们在测试环境中如何创建分区labels的示例:

1
https://github.com/ceph/cbt/blob/master/tools/mkpartmagna.sh

CREATING A YAML FILE(创建一个YAML文件)

CBT yaml文件具有一个基本结构,您可以在其中定义一个cluster和一组针对它运行的基准测试。例如, 下面的 yaml 文件在具有主机名 “burnupiX “ 的节点上创建单节点群集。使用256个PG为1x replication pool定义pool的配置文件,并且该pool用于使用带有librbd引擎的fio运行RBD性能测试。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
cluster:
user: 'nhm'
head: "burnupiX"
clients: ["burnupiX"]
osds: ["burnupiX"]
mons:
burnupiX:
a: "127.0.0.1:6789"
osds_per_node: 1
fs: 'xfs'
mkfs_opts: '-f -i size=2048'
mount_opts: '-o inode64,noatime,logbsize=256k'
conf_file: '/home/nhm/src/ceph-tools/cbt/newstore/ceph.conf.1osd'
iterations: 1
use_existing: False
clusterid: "ceph"
tmp_dir: "/tmp/cbt"
pool_profiles:
rbd:
pg_size: 256
pgp_size: 256
replication: 1
benchmarks:
librbdfio:
time: 300
vol_size: 16384
mode: [read, write, randread, randwrite]
op_size: [4194304, 2097152, 1048576]
concurrent_procs: [1]
iodepth: [64]
osd_ra: [4096]
cmd_path: '/home/nhm/src/fio/fio'
pool_profile: 'rbd'

还定义了一个关联的ceph.conf.1osd文件,其中包含要在此测试中使用的各种设置:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[global]
osd pool default size = 1
auth cluster required = none
auth service required = none
auth client required = none
keyring = /tmp/cbt/ceph/keyring
osd pg bits = 8
osd pgp bits = 8
log to syslog = false
log file = /tmp/cbt/ceph/log/$name.log
public network = 192.168.10.0/24
cluster network = 192.168.10.0/24
rbd cache = true
osd scrub load threshold = 0.01
osd scrub min interval = 137438953472
osd scrub max interval = 137438953472
osd deep scrub interval = 137438953472
osd max scrubs = 16
filestore merge threshold = 40
filestore split multiple = 8
osd op threads = 8
mon pg warn max object skew = 100000
mon pg warn min per osd = 0
mon pg warn max per osd = 32768

[mon]
mon data = /tmp/cbt/ceph/mon.$id

[mon.a]
host = burnupiX
mon addr = 127.0.0.1:6789

[osd.0]
host = burnupiX
osd data = /tmp/cbt/mnt/osd-device-0-data
osd journal = /dev/disk/by-partlabel/osd-device-0-journal

要运行此基准测试suite,cbt启动时带参数来指定用于存储结果archive目录和要使用的配置文件:

1
cbt.py --archive=<archive dir> ./mytests.yaml

您还可以通过在命令行上指定ceph.conf文件来指定要使用的文件:

1
cbt.py -archive=<archive dir> --conf=./ceph.conf.1osd ./mytests.yaml

通过这种方式,可以混合或匹配ceph.conf文件和yaml测试文件,以创建测试的参数化扫描。tools目录中名为mkcephconf.py的脚本可以自动生成数百或数千个ceph.conf文件(需要自定义选项范围),然后以这种方式与cbt一起使用。

CONCLUSION(结论)

还有许多其他功能强大的方法可以使用,本文档中尚未涵盖所有cbt的功能。随着时间的推移,我们将尝试为这些功能提供更好的示例和文档。现在,最好查看示例,查看代码,并提出问题!

实验

该实验没有使用nhm账户,以下实验都是root用户,所以ssh互信需要把nhm改为root

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
cbt节点
1、安装pdsh
[root@cbt ~]# yum -y install epel-release
[root@cbt ~]# yum install -y pdsh
与ceph节点做免密
[root@cbt ~]# ssh-keygen
[root@cbt cbt]# ssh-copy-id nhm@192.168.0.45
测试
[root@cbt ~]# pdsh -w 192.168.0.45 -l nhm uptime
192.168.0.45: 14:28:43 up 32 min, 1 user, load average: 0.00, 0.01, 0.05
2、安装piip
[root@cbt ~]# curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
[root@cbt ~]# python get-pip.py
[root@cbt ~]# python -m pip install -U pip
[root@cbt ~]# pip install --upgrade setuptools
3、安装python依赖库
[root@cbt cbt]# pip install lxml numpy PyYAML
4、配置/etc/hosts文件
[root@cbt cbt]# cat /etc/hosts
192.168.0.45 cbt-ceph-1
192.168.0.46 cbt-ceph-2
192.168.0.47 cbt-ceph-3


ceph节点
[root@cbt-ceph-1 ~]# yum -y install epel-release
[root@cbt-ceph-1 ~]# yum -y install collectl perf blktrace
[root@cbt-ceph-1 ~]# useradd -m nhm -g root -G root
[root@cbt-ceph-1 ~]# echo nhm:nhm | chpasswd
[root@cbt-ceph-1 ~]# vi /etc/sudoers
添加
nhm ALL=(ALL) NOPASSWD: ALL

[root@cbt-ceph-1 ~]# yum -y install epel-release
[root@cbt-ceph-1 ~]# yum install -y pdsh
[root@cbt-ceph-1 ~]# yum install -y fio



cbt单节点radosbench配置文件
[root@cbt ~]# cat /root/mytests.yaml
cluster:
user: 'root'
head: "cbt-ceph-1"
clients: ["cbt-ceph-1"]
osds: ["cbt-ceph-1"]
mons: ["cbt-ceph-1"]
osds_per_node: 1
fs: 'xfs'
mkfs_opts: '-f -i size=2048'
mount_opts: '-o inode64,noatime,logbsize=256k'
conf_file: '/etc/ceph/ceph.conf'
iterations: 1
use_existing: True
version_compat: jewel
clusterid: "ceph"
tmp_dir: "/tmp/cbt"
pool_profiles:
rbd:
pg_size: 256
pgp_size: 256
replication: 1
benchmarks:
radosbench:
op_size: [ 4194304, 524288, 4096 ]
write_only: False
time: 5
concurrent_ops: [ 128 ]
concurrent_procs: 1
use_existing: True
pool_profile: replicated


cbt单节点librbdfio配置文件
[root@cbt cbt]# cat /root/mytests.yaml
cluster:
user: 'root'
head: "cbt-ceph-1"
clients: ["cbt-ceph-1"]
osds: ["cbt-ceph-1"]
mons: ["cbt-ceph-1"]
osds_per_node: 1
fs: 'xfs'
mkfs_opts: '-f -i size=2048'
mount_opts: '-o inode64,noatime,logbsize=256k'
conf_file: '/etc/ceph/ceph.conf'
iterations: 1
use_existing: True
version_compat: jewel
clusterid: "ceph"
tmp_dir: "/tmp/cbt"
pool_profiles:
rbd:
pg_size: 256
pgp_size: 256
replication: 1
benchmarks:
librbdfio:
time: 300
vol_size: 16384
mode: [read, write, randread, randwrite]
op_size: [4194304, 2097152, 1048576]
concurrent_procs: [1]
iodepth: [64]
osd_ra: [4096]
pool_profile: 'rbd'

运行cbt
[root@cbt cbt]# python cbt.py --archive=yujiangresult /root/mytests.yaml