[root@node2 ~]
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4bdcce0ee63e nginx:latest
"nginx -g 'daemon off"
22 minutes ago Up 22 minutes 80
/tcp
my-
test
.1.8433fuiy7vpu0p80arl7vggfe
[root@node2 ~]
# docker exec -ti 4bdcce0ee63e /bin/bash
root@4bdcce0ee63e:/
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
valid_lft forever preferred_lft forever
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
1786: eth0@if1787: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link
/ether
02:42:0a:ff:00:08 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.255.0.8
/16
scope global eth0
valid_lft forever preferred_lft forever
inet 10.255.0.6
/32
scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:feff:8
/64
scope link
valid_lft forever preferred_lft forever
1788: eth1@if1789: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link
/ether
02:42:ac:12:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet 172.18.0.3
/16
scope global eth1
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe12:3
/64
scope link
valid_lft forever preferred_lft forever
1791: eth2@if1792: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link
/ether
02:42:0a:0a:13:04 brd ff:ff:ff:ff:ff:ff link-netnsid 2
inet 10.10.19.4
/24
scope global eth2
valid_lft forever preferred_lft forever
inet 10.10.19.2
/32
scope global eth2
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:fe0a:1304
/64
scope link
valid_lft forever preferred_lft forever
root@4bdcce0ee63e:/
# ping 10.10.19.3
PING 10.10.19.3 (10.10.19.3): 56 data bytes
64 bytes from 10.10.19.3: icmp_seq=0 ttl=64
time
=0.890 ms
64 bytes from 10.10.19.3: icmp_seq=1 ttl=64
time
=0.622 ms
.....-
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min
/avg/max/stddev
= 0.622
/0
.756
/0
.890
/0
.134 ms
root@4bdcce0ee63e:/
# ping 10.10.19.6
PING 10.10.19.6 (10.10.19.6): 56 data bytes
64 bytes from 10.10.19.6: icmp_seq=0 ttl=64
time
=0.939 ms
64 bytes from 10.10.19.6: icmp_seq=1 ttl=64
time
=0.590 ms
----------------------------使用swarm模式的服務發(fā)現(xiàn)--------------------------
默認情況下,當創(chuàng)建了一個服務并連接到某個網(wǎng)絡后,swarm會為該服務分配一個VIP。此VIP根據(jù)服務名映射到DNS。在網(wǎng)絡上的容器共享該服務的DNS映射,
所以網(wǎng)絡上的任意容器可以通過服務名訪問服務。
在同一overlay網(wǎng)絡中,不用通過端口映射來使某個服務可以被其它服務訪問。Swarm內(nèi)部的負載均衡器自動將請求發(fā)送到服務的VIP上,然后分發(fā)到所有的
active的task上。
如下示例:
在同一個網(wǎng)絡中添加了一個centos服務,此服務可以通過名稱my-
test
訪問前面創(chuàng)建的nginx服務:
[root@manager-node ~]
# docker service create --name my-centos --network ngx_net centos
查詢centos運行在哪個節(jié)點上(上面創(chuàng)建命令執(zhí)行后,需要一段時間才能完成這個centos服務的創(chuàng)建)
[root@manager-node ~]
# docker service ps my-centos
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
e03pqgkjs3l1qizc6v4aqaune my-centos.1 centos node2 Running Preparing 4 seconds ago
登錄centos運行的節(jié)點(由上可知是node2節(jié)點),打開centos的交互shell:
[root@node2 ~]
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS NAMES
e4554490d891 centos:latest
"/bin/bash"
About an hour ago Up About an hour my-centos.1.9yk5ie28gwk9mw1h1jovb68ki
[root@node2 ~]
# docker exec -ti my-centos.1.9yk5ie28gwk9mw1h1jovb68ki /bin/bash
root@4bdcce0ee63e:/
# nslookup my-test
Server: 127.0.0.11
Address 1: 127.0.0.11
Name: my-
test
Address 1: 10.10.19.2 10.10.19.2
從centos容器內(nèi)部,使用特殊查詢 查詢DNS,來找到my-
test
服務的所有容器的IP地址:
root@4bdcce0ee63e:/
# nslookup tasks.my-test
Server: 127.0.0.11
Address 1: 127.0.0.11
Name: tasks.my-
test
Address 1: 10.10.19.4 my-
test
.1.8433fuiy7vpu0p80arl7vggfe
Address 2: 10.10.19.5 my-
test
.2.f1h7a0vtojv18zrsiw8j0rzaw
Address 3: 10.10.19.6 my-
test
.3.ex73ifk3jvzw8ukurl8yu7fyq
Address 2: 10.10.19.7 my-
test
.4.cyu73jd8psupfhken23vvmpud
Address 3: 10.10.19.3 my-
test
.5.btorxekfix4hcqh4v83dr0tzw
從centos容器內(nèi)部,通過wget來訪問my-
test
服務中運行的nginx網(wǎng)頁服務器
root@4bdcce0ee63e:/
# wget -O- my-test
Connecting to my-
test
(10.10.19.2:80)
<!DOCTYPE html>
<html>
<
head
>
<title>Welcome to nginx!<
/title
>
...
Swarm的負載均衡器自動將HTTP請求路由到VIP上,然后到一個active的task容器上。它根據(jù)round-robin選擇算法將后續(xù)的請求分發(fā)到另一個active的task上。
-----------------------------------為服務使用DNS round-robin-----------------------------
在創(chuàng)建服務時,可以配置服務直接使用DNS round-robin而無需使用VIP。這是通過在創(chuàng)建服務時指定 --endpoint-mode dnsrr 命令行參數(shù)實現(xiàn)的。
當你想要使用自己的負載均衡器時可以使用這種方式。
如下示例(注意:使用DNS round-robin方式創(chuàng)建服務,不能直接在命令里使用-p指定端口)
[root@manager-node ~]
# docker service create --replicas 3 --name my-dnsrr-nginx --network ngx_net --endpoint-mode dnsrr nginx
[root@manager-node ~]
# docker service ps my-dnsrr-nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
65li2zbhxvvoaesndmwjokouj my-dnsrr-nginx.1 nginx node1 Running Running 2 minutes ago
5hjw7wm4xr877879m0ewjciuj my-dnsrr-nginx.2 nginx manager-node Running Running 2 minutes ago
afo7acduge2qfy60e87liz557 my-dnsrr-nginx.3 nginx manager-node Running Running 2 minutes ago
當通過服務名稱查詢DNS時,DNS服務返回所有任務容器的IP地址:
root@4bdcce0ee63e:/
# nslookup my-dnsrr-nginx
Server: 127.0.0.11
Address 1: 127.0.0.11
Name: my-dnsrr-nginx
Address 1: 10.10.19.10 my-dnsrr-nginx.3.0sm1n9o8hygzarv5t5eq46okn.my-network
Address 2: 10.10.19.9 my-dnsrr-nginx.2.b3o1uoa8m003b2kk0ytl9lawh.my-network
Address 3: 10.10.19.8 my-dnsrr-nginx.1.55za4c83jq9846rle6eigiq15.my-network
需要注意的是:一定要確認VIP的連通性
通常Docker官方推薦使用
dig
,
nslookup
或其它DNS查詢工具來查詢通過DNS對服務名的訪問。因為VIP是邏輯IP,
ping
并不是確認VIP連通性的正確的工具。