[vagrant@docker-node1 ~]$ sudo ip netns list [vagrant@docker-node1 ~]$ sudo ip netns add test1 [vagrant@docker-node1 ~]$ sudo ip netns add test2 [vagrant@docker-node1 ~]$ sudo ip netns list test2 test1 [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link set dev lo up [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever
[vagrant@docker-node1 ~]$ sudo ip link add veth-test1 type veth peer name veth-test2 [vagrant@docker-node1 ~]$ ip link 5: veth-test2@veth-test1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff 6: veth-test1@veth-test2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff
[vagrant@docker-node1 ~]$ sudo ip link set veth-test1 netns test1 [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 6: veth-test1@if5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff link-netnsid 0 [vagrant@docker-node1 ~]$ ip link 5: veth-test2@if6: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0
[vagrant@docker-node1 ~]$ sudo ip link set veth-test2 netns test2 [vagrant@docker-node1 ~]$ ip link 5: veth-test2@if6也不见了 [vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip link 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 5: veth-test2@if6: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0
分配IP地址 [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip addr add 192.168.1.2/24 dev veth-test1 [vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip addr add 192.168.1.3/24 dev veth-test2 [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip link set dev veth-test1 up [vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip link set dev veth-test2 up [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 6: veth-test1@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether 3e:20:9c:b8:9c:2b brd ff:ff:ff:ff:ff:ff link-netnsid 1 inet 192.168.1.2/24 scope global veth-test1 valid_lft forever preferred_lft forever inet6 fe80::3c20:9cff:feb8:9c2b/64 scope link valid_lft forever preferred_lft forever [vagrant@docker-node1 ~]$ sudo ip netns exec test2 ip a 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 5: veth-test2@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether 96:92:53:64:58:17 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 192.168.1.3/24 scope global veth-test2 valid_lft forever preferred_lft forever inet6 fe80::9492:53ff:fe64:5817/64 scope link valid_lft forever preferred_lft forever
互ping [vagrant@docker-node1 ~]$ sudo ip netns exec test1 ping 192.168.1.3 PING 192.168.1.3 (192.168.1.3) 56(84) bytes of data. 64 bytes from 192.168.1.3: icmp_seq=1 ttl=64 time=0.051 ms [vagrant@docker-node1 ~]$ sudo ip netns exec test2 ping 192.168.1.2 PING 192.168.1.2 (192.168.1.2) 56(84) bytes of data. 64 bytes from 192.168.1.2: icmp_seq=1 ttl=64 time=0.035 ms
docker run --name nginx -d -p <容器中端口>:<宿主机端口> nginx [vagrant@docker-node1 ~]$ docker run --name nginx -d -p 80:80 nginx [vagrant@docker-node1 ~]$ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 82d01fa9a547 nginx "nginx -g 'daemon of…" 14 seconds ago Up 13 seconds 0.0.0.0:80->80/tcp nginx [vagrant@docker-node1 ~]$ curl 127.0.0.1 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html>
[vagrant@docker-node1 etcd-v3.3.10-linux-amd64]$ ./etcdctl cluster-health member 14192bed1b668a6 is healthy: got healthy result from http://192.168.56.61:2379 member 80c395b734da48f6 is healthy: got healthy result from http://192.168.56.62:2379 cluster is healthy [vagrant@docker-node2 etcd-v3.3.10-linux-amd64]$ ./etcdctl cluster-health member 14192bed1b668a6 is healthy: got healthy result from http://192.168.56.61:2379 member 80c395b734da48f6 is healthy: got healthy result from http://192.168.56.62:2379 cluster is healthy
int main() { printf("hello world!\n"); return 0; }
# 不加-static编译,docker run会报错,为什么?后续再查... ... # standard_init_linux.go:190: exec user process caused "no such file or directory" [vagrant@localhost ~]$ gcc -static helloworld.c -o helloworld
[vagrant@localhost ~]$ docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE yujiang/helloworld latest b898a6498b21 21 seconds ago 857kB
查看分层 [vagrant@localhost ~]$ docker history yujiang/helloworld IMAGE CREATED CREATED BY SIZE COMMENT b898a6498b21 About a minute ago /bin/sh -c #(nop) CMD ["/helloworld"] 0B 2949199fbdb8 About a minute ago /bin/sh -c #(nop) ADD file:5e0b91d4866514aa0… 857kB
[vagrant@localhost ~]$ ll -h total 848K -rw-rw-r--. 1 vagrant vagrant 50 Dec 13 16:41 Dockerfile -rwxrwxr-x. 1 vagrant vagrant 837K Dec 13 16:47 helloworld -rw-rw-r--. 1 vagrant vagrant 79 Dec 13 16:11 helloworld.c
[vagrant@localhost ~]$ docker run yujiang/helloworld hello world!
[vagrant@localhost ~]$ docker run -it centos [root@b2985a1be234 /]# yum install vim -y [root@b2985a1be234 /]# exit [vagrant@localhost ~]$ docker container ls -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES b2985a1be234 centos "/bin/bash" 3 minutes ago Exited (0) About a minute ago nervous_haslett
2、使用docker commit创建image
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
[vagrant@localhost ~]$ docker commit b2985a1be234 yujiang/centos-vim sha256:d0a8856e664eb754f9854c0c27a92c7d01623d2087a77269fbd12ba5021e6e13 [vagrant@localhost ~]$ docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE yujiang/centos-vim latest d0a8856e664e 59 seconds ago 327MB centos latest 1e1148e4cc2c 7 days ago 202MB
[vagrant@localhost ~]$ docker history 1e1148e4cc2c IMAGE CREATED CREATED BY SIZE COMMENT 1e1148e4cc2c 7 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B <missing> 7 days ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B <missing> 7 days ago /bin/sh -c #(nop) ADD file:6f877549795f4798a… 202MB [vagrant@localhost ~]$ docker history d0a8856e664e IMAGE CREATED CREATED BY SIZE COMMENT d0a8856e664e About a minute ago /bin/bash 126MB 1e1148e4cc2c 7 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B <missing> 7 days ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B <missing> 7 days ago /bin/sh -c #(nop) ADD file:6f877549795f4798a… 202MB
这样发布image是不安全的,因为其他人不知道你对镜像做了哪些修改。不提倡。
使用docker build制作image
1、创建centos-vim目录
1
[vagrant@localhost ~]$ mkdir centos-vim && cd centos-vim
2、编写Dockerfile
1 2 3
[vagrant@localhost centos-vim]$ vim Dockerfile FROM centos RUN yum install vim -y
[vagrant@localhost centos-vim]$ docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE yujiang/centos-vim latest e00635baf672 6 minutes ago 327MB centos latest 1e1148e4cc2c 7 days ago 202MB
[vagrant@localhost ~]$ docker login Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one. Username: lnsyyj Password: Login Succeeded
[vagrant@localhost ~]$ docker push lnsyyj/helloworld:latest The push refers to repository [docker.io/lnsyyj/helloworld] a9094ec14918: Pushed latest: digest: sha256:dd740db962a1e3a8fb74461505f539248b7c88de80b133db612c22e80d7b2d17 size: 527
[1]epel参考:https://blog.csdn.net/yasi_xi/article/details/11746255 EPEL的全称叫 Extra Packages for Enterprise Linux 。EPEL是由 Fedora 社区打造,为 RHEL 及衍生发行版如 CentOS、Scientific Linux 等提供高质量软件包的项目。装上了 EPEL之后,就相当于添加了一个第三方源。
7、初始化OSD数据目录 [root@cephlm ~]# ceph-osd -i $ID --mkfs --osd-uuid $UUID 2019-08-02 18:07:34.990087 7f41b6680d80 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway 2019-08-02 18:07:35.061501 7f41b6680d80 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway 2019-08-02 18:07:35.062969 7f41b6680d80 -1 journal do_read_entry(4096): bad header magic 2019-08-02 18:07:35.062993 7f41b6680d80 -1 journal do_read_entry(4096): bad header magic 2019-08-02 18:07:35.063581 7f41b6680d80 -1 read_settings error reading settings: (2) No such file or directory 2019-08-02 18:07:35.146735 7f41b6680d80 -1 created object store /var/lib/ceph/osd/ceph-0 for osd.0 fsid c8b0b137-1ba7-4c1f-a514-281139c35233
4、启动minikube单节点k8s(https://www.jianshu.com/p/a7620f73c7f3) yujiangdeMBP-13:~ yujiang$ minikube start Starting local Kubernetes v1.9.4 cluster... Starting VM... Getting VM IP address... Moving files into cluster... Setting up certs... Connecting to cluster... Setting up kubeconfig... Starting cluster components... Kubectl is now configured to use the cluster. Loading cached images from config file.
5、查询nginx IP $ docker network ls NETWORK ID NAME DRIVER SCOPE fb08befd952b bridge bridge local 2de0a434731f host host local 4cd7f4c7083c none null local
$ curl 172.17.0.2 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html>
6、指定Pod名,进入Container(默认进入第1个Container)。如果Pod中有多个Container,可以加"-c"参数指定进入哪个Container。 yujiangdeMBP-13:k8s_yaml yujiang$ kubectl exec -it nginx sh
7、打印nginx Pod的详细信息 yujiangdeMBP-13:k8s_yaml yujiang$ kubectl describe pod nginx
yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pod NAME READY STATUS RESTARTS AGE nginx-prlfw 1/1 Running 0 19s nginx-szrh8 1/1 Running 0 19s nginx-z59kd 1/1 Running 0 19s
# 修改横向扩展数量 yujiangdeMBP-13:k8s_yaml yujiang$ kubectl scale rc nginx --replicas=2 replicationcontroller/nginx scaled yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods NAME READY STATUS RESTARTS AGE nginx-szrh8 1/1 Running 1 21h nginx-z59kd 1/1 Running 1 21h yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get rc NAME DESIRED CURRENT READY AGE nginx 2 2 2 21h yujiangdeMBP-13:k8s_yaml yujiang$ kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-szrh8 1/1 Running 1 21h 172.17.0.4 minikube <none> <none> nginx-z59kd 1/1 Running 1 21h 172.17.0.6 minikube <none> <none>
vi /etc/sysconfig/kubelet KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
MariaDB [imooc]> INSERT INTO user (name,gender,age) values('zhangsan',1,21); Query OK, 1 row affected (0.00 sec) MariaDB [imooc]> INSERT INTO user (name,gender,age) values('lisi',0,22); Query OK, 1 row affected (0.01 sec) MariaDB [imooc]> INSERT INTO user (name,gender,age) values('wangwu',1,20); Query OK, 1 row affected (0.00 sec)
MariaDB [imooc]> select * from user; +----+----------+--------+-----+ | id | name | gender | age | +----+----------+--------+-----+ | 1 | zhangsan | 1 | 21 | | 2 | lisi | 0 | 22 | | 3 | wangwu | 1 | 20 | +----+----------+--------+-----+ 3 rows in set (0.00 sec)
生成代码 yujiangdeMBP-13:imooc yujiang$ bee generate scaffold user -fields="id:int64,name:string,gender:int,age:int" -driver=mysql -conn="root:@tcp(192.168.56.101:3306)/imooc" ______ | ___ \ | |_/ / ___ ___ | ___ \ / _ \ / _ \ | |_/ /| __/| __/ \____/ \___| \___| v1.10.0 2018/10/28 19:30:25 INFO ▶ 0001 Do you want to create a 'user' model? [Yes|No] Yes 2018/10/28 19:31:34 INFO ▶ 0002 Using 'User' as model name 2018/10/28 19:31:34 INFO ▶ 0003 Using 'models' as package name create /Users/yujiang/go/src/imooc/models/user.go 2018/10/28 19:31:34 INFO ▶ 0004 Do you want to create a 'user' controller? [Yes|No] Yes 2018/10/28 19:31:43 INFO ▶ 0005 Using 'User' as controller name 2018/10/28 19:31:43 INFO ▶ 0006 Using 'controllers' as package name 2018/10/28 19:31:43 INFO ▶ 0007 Using matching model 'User' create /Users/yujiang/go/src/imooc/controllers/user.go 2018/10/28 19:31:43 INFO ▶ 0008 Do you want to create views for this 'user' resource? [Yes|No] Yes 2018/10/28 19:31:52 INFO ▶ 0009 Generating view... create /Users/yujiang/go/src/imooc/views/user/index.tpl create /Users/yujiang/go/src/imooc/views/user/show.tpl create /Users/yujiang/go/src/imooc/views/user/create.tpl create /Users/yujiang/go/src/imooc/views/user/edit.tpl 2018/10/28 19:31:52 INFO ▶ 0010 Do you want to create a 'user' migration and schema for this resource? [Yes|No] No 2018/10/28 19:32:11 INFO ▶ 0011 Do you want to migrate the database? [Yes|No] No 2018/10/28 19:32:13 SUCCESS ▶ 0012 All done! Don't forget to add beego.Router("/user" ,&controllers.UserController{}) to routers/route.go