目录

Life in Flow

知不知,尚矣;不知知,病矣。
不知不知,殆矣。

X

K8S

CentOS8 yum/dnf 配置国内源

 1# 挂载镜像
 2[root@localhost /]# mkdir /media/CentOS
 3[root@localhost /]# mount /dev/cdrom /media/CentOS
 4
 5# 修改配置文件
 6[root@localhost /]# vi /etc/yum.repos.d/CentOS-AppStream.repo
 7[AppStream]
 8name=CentOS-$releasever - AppStream
 9baseurl=http://mirrors.aliyun.com/centos/$releasever/AppStream/$basearch/os/
10gpgcheck=1
11enabled=1
12gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
13
14[root@localhost /]# vi /etc/yum.repos.d/CentOS-Base.repo
15[BaseOS]
16name=CentOS-$releasever - Base
17baseurl=http://mirrors.aliyun.com/centos/$releasever/BaseOS/$basearch/os/
18gpgcheck=1
19enabled=1
20gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
21
22[root@localhost /]# vi /etc/yum.repos.d/CentOS-Epel.repo
23[epel]
24name=CentOS-$releasever - Epel
25baseurl=http://mirrors.aliyun.com/epel/8/Everything/$basearch
26enabled=1
27gpgcheck=0
28
29[root@localhost /]# vi  /etc/yum.repos.d/CentOS-Media.repo
30[c8-media-BaseOS]
31name=CentOS-BaseOS-$releasever - Media
32baseurl=file:///media/CentOS/BaseOS/
33gpgcheck=1
34enabled=1
35gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
36[c8-media-AppStream]
37name=CentOS-AppStream-$releasever - Media
38baseurl=file:///media/CentOS/AppStream/
39gpgcheck=1
40enabled=1
41gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
42
43# 清除所有的缓存文件
44[root@localhost /]# dnf clean all
45
46# 制作元数据缓存
47[root@localhost yum.repos.d]# dnf makecache
48CentOS-8 - AppStream                                                                                      9.2 kB/s | 1.7 MB     03:04    
49CentOS-8 - Base                                                                                            22 kB/s | 1.5 MB     01:09    
50CentOS-8 - Epel                                                                                            41 kB/s | 3.5 MB     01:28    
51CentOS-AppStream-8 - Media                                                                                133 MB/s | 5.2 MB     00:00    
52CentOS-BaseOS-8 - Media                                                                                   120 MB/s | 2.2 MB     00:00    
53Docker CE Stable - x86_64                                                                                 2.1 kB/s |  21 kB     00:10    
54Failed to synchronize cache for repo 'AppStream', ignoring this repo.
55Failed to synchronize cache for repo 'BaseOS', ignoring this repo.
56Failed to synchronize cache fo

安装 docker

 1yum install -y yum-utils  device-mapper-persistent-data  lvm2
 2yum-config-manager  --add-repo   https://download.docker.com/linux/centos/docker-ce.repo
 3
 4# 解决报错:Problem: package docker-ce-3:19.03.4-3.el7.x86_64 requires containerd.io >= 1.2.2-3 那就先装新版的 containerd.io
 5[root@localhost yum.repos.d]# dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
 6
 7#  安装docker-ce
 8[root@localhost yum.repos.d]# yum install docker-ce docker-ce-cli
 9
10# 部署docker
11systemctl start docker
12
13# 关闭selinux 和 防火墙
14[root@localhost ~]# systemctl stop firewalld
15[root@localhost ~]# setenforce 0

部署 K8S 集群

  1# 分别设置两个主机的hostname
  2hostnamectl set-hostname master
  3hostnamectl set-hostname node1
  4
  5# 修改国内的yum源
  6[root@localhost yum.repos.d]# vi /etc/yum.repos.d/kubernetes.repo
  7[kubernetes]
  8name=Kubernetes Repo
  9baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
 10gpgcheck=0
 11enabled=1
 12
 13# 安装kubeadm和相关工具
 14[root@localhost yum.repos.d]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
 15
 16# 启动服务并设置为开机启动
 17[root@localhost yum.repos.d]# systemctl enable docker && systemctl start docker 
 18[root@localhost yum.repos.d]# systemctl enable kubelet && systemctl start kubelet
 19
 20# kubeadm config 命令获取默认的初始化参数文件
 21[root@localhost tmp]# kubeadm config print init-defaults > init.default.yaml
 22
 23# 加速docker镜像(增加Registry Mirror)
 24[root@localhost tmp]# vi /etc/docker/daemon.json
 25{
 26  "registry-mirrors": [
 27    "https://dockerhub.azk8s.cn",
 28    "https://reg-mirror.qiniu.com"
 29  ]
 30}
 31
 32# 自己一个一个下载国内的
 33docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2
 34docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0
 35docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
 36docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1
 37docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1
 38docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1
 39docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1
 40
 41# 版本信息需要根据实际情况进行相应的修改。通过docker tag命令来修改镜像的标签:
 42[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 k8s.gcr.io/kube-apiserver:v1.16.1  
 43[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 k8s.gcr.io/kube-controller-manager:v1.16.1
 44[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 k8s.gcr.io/kube-scheduler:v1.16.1
 45[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 k8s.gcr.io/kube-proxy:v1.16.1
 46[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
 47[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0
 48[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2
 49
 50# 配置swap忽略
 51[root@localhost tmp]# cat /etc/sysconfig/kubelet
 52KUBELET_EXTRA_ARGS="--fail-swap-on=false"
 53
 54# 运行kubeadm init命令安装 Master
 55kubeadm init --kubernetes-version=v1.16.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12--ignore-preflight-errors=Swap
 56serviceSubnet: Invalid value: "10.96.0.0/12--ignore-preflight-errors=Swap": couldn't parse subnet
 57To see the stack trace of this error execute with --v=5 or higher
 58[root@localhost ~]# kubeadm init --kubernetes-version=v1.16.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
 59[init] Using Kubernetes version: v1.16.1
 60[preflight] Running pre-flight checks
 61	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
 62	[WARNING Swap]: running with swap on is not supported. Please disable swap
 63	[WARNING FileExisting-tc]: tc not found in system path
 64	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
 65[preflight] Pulling images required for setting up a Kubernetes cluster
 66[preflight] This might take a minute or two, depending on the speed of your internet connection
 67[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
 68[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
 69[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
 70[kubelet-start] Activating the kubelet service
 71[certs] Using certificateDir folder "/etc/kubernetes/pki"
 72[certs] Generating "ca" certificate and key
 73[certs] Generating "apiserver" certificate and key
 74[certs] apiserver serving cert is signed for DNS names [localhost.localdomain kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.31.201]
 75[certs] Generating "apiserver-kubelet-client" certificate and key
 76[certs] Generating "front-proxy-ca" certificate and key
 77[certs] Generating "front-proxy-client" certificate and key
 78[certs] Generating "etcd/ca" certificate and key
 79[certs] Generating "etcd/server" certificate and key
 80[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.31.201 127.0.0.1 ::1]
 81[certs] Generating "etcd/peer" certificate and key
 82[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.31.201 127.0.0.1 ::1]
 83[certs] Generating "etcd/healthcheck-client" certificate and key
 84[certs] Generating "apiserver-etcd-client" certificate and key
 85[certs] Generating "sa" key and public key
 86[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
 87[kubeconfig] Writing "admin.conf" kubeconfig file
 88[kubeconfig] Writing "kubelet.conf" kubeconfig file
 89[kubeconfig] Writing "controller-manager.conf" kubeconfig file
 90[kubeconfig] Writing "scheduler.conf" kubeconfig file
 91[control-plane] Using manifest folder "/etc/kubernetes/manifests"
 92[control-plane] Creating static Pod manifest for "kube-apiserver"
 93[control-plane] Creating static Pod manifest for "kube-controller-manager"
 94[control-plane] Creating static Pod manifest for "kube-scheduler"
 95[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
 96[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
 97[apiclient] All control plane components are healthy after 34.501556 seconds
 98[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
 99[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
100[upload-certs] Skipping phase. Please see --upload-certs
101[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"
102[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
103[bootstrap-token] Using token: rowlq0.6dmbks5hyk5x767d
104[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
105[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
106[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
107[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
108[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
109[addons] Applied essential addon: CoreDNS
110[addons] Applied essential addon: kube-proxy
111
112Your Kubernetes control-plane has initialized successfully!
113
114To start using your cluster, you need to run the following as a regular user:
115
116  mkdir -p $HOME/.kube
117  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
118  sudo chown $(id -u):$(id -g) $HOME/.kube/config
119
120You should now deploy a pod network to the cluster.
121Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
122  https://kubernetes.io/docs/concepts/cluster-administration/addons/
123
124Then you can join any number of worker nodes by running the following on each as root:
125
126kubeadm join 192.168.31.201:6443 --token rowlq0.6dmbks5hyk5x767d \
127    --discovery-token-ca-cert-hash sha256:fddbabe7b4a8833252b467efb723973a8a5559dd8b8c1babf26322968372352b 
128
129# 设置环境变量
130[root@localhost ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
131[root@localhost ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf
132
133# 安装网络插件CNI(这里选择了weave)
134$ kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
135
136
137# 安装Node,加入集群
138
139# 安装docker
140...
141
142# 自己一个一个下载国内的 
143docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 
144
145# 版本信息需要根据实际情况进行相应的修改。通过docker tag命令来修改镜像的标签: 
146[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 k8s.gcr.io/kube-apiserver:v1.16.1 
147[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 k8s.gcr.io/kube-controller-manager:v1.16.1 
148[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 k8s.gcr.io/kube-scheduler:v1.16.1 
149[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 k8s.gcr.io/kube-proxy:v1.16.1 
150[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1 
151[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0 
152[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2
153
154# 忽略swap
155vi /etc/sysconfig/kubelet
156KUBELET_EXTRA_ARGS="--fail-swap-on=false"
157
158# 加入集群
159[root@localhost ~]# kubeadm join 192.168.31.201:6443 --token rowlq0.6dmbks5hyk5x767d     --discovery-token-ca-cert-hash sha256:fddbabe7b4a8833252b467efb723973a8a5559dd8b8c1babf26322968372352b --ignore-preflight-errors=Swap
160[preflight] Running pre-flight checks
161	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
162	[WARNING Swap]: running with swap on is not supported. Please disable swap
163	[WARNING FileExisting-tc]: tc not found in system path
164	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
165[preflight] Reading configuration from the cluster...
166[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
167[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
168[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
169[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
170[kubelet-start] Activating the kubelet service
171[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
172
173This node has joined the cluster:
174* Certificate signing request was sent to apiserver and a response was received.
175* The Kubelet was informed of the new secure connection details.
176
177Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
178
179# 主机回复原状态
180kubeadm reset
181
182# 查看节点
183[root@master ~]# kubectl get nodes
184NAME     STATUS   ROLES    AGE     VERSION
185master   Ready    master   3m14s   v1.16.3
186node1    Ready    <none>   2m12s   v1.16.3
187
188# 查看所有pods
189[root@master ~]# kubectl get pods -n kube-system -o wide
190NAME                             READY   STATUS    RESTARTS   AGE    IP               NODE     NOMINATED NODE   READINESS GATES
191coredns-5644d7b6d9-8gvbh         1/1     Running   0          2m5s   10.44.0.1        master   <none>           <none>
192coredns-5644d7b6d9-t9kb5         0/1     Running   0          2m5s   10.32.0.3        node1    <none>           <none>
193etcd-master                      1/1     Running   0          58s    192.168.31.201   master   <none>           <none>
194kube-apiserver-master            1/1     Running   0          80s    192.168.31.201   master   <none>           <none>
195kube-controller-manager-master   1/1     Running   0          82s    192.168.31.201   master   <none>           <none>
196kube-proxy-csldc                 1/1     Running   0          2m5s   192.168.31.201   master   <none>           <none>
197kube-proxy-mgbz4                 1/1     Running   0          82s    192.168.31.202   node1    <none>           <none>
198kube-scheduler-master            1/1     Running   0          74s    192.168.31.201   master   <none>           <none>
199weave-net-lb5lm                  2/2     Running   0          41s    192.168.31.201   master   <none>           <none>
200weave-net-n4lvd                  2/2     Running   0          41s    192.168.31.202   node1    <none>           <none>

Kunernetes 简介

 它是一个全新的基于容器技术的分布式架构领先方案。源于谷歌谷歌的 Borg。目的是实现资源管理的自动化,以及跨多个数据中心的资源利用率的最大化。
 其次,如果系统遵循 Kubenetes 的设计思想,那么传统系统架构中哪些和业务没有多大关系的底层代码或者功能模块,统统不必再去关注(负载均衡、部署实施、服务治理、故障处理等…),总之,K8S 提供的解决方案可以节约不少于 30% 的开发成本,而且还提供了强大的运维机制。

Service

 在 Kubenetes 中,Service 是分布式集群架构的核心,一个 Service 对象拥有如下关键特征。

  • 拥有唯一指定的名称(mysql-server)
  • 拥有一个虚拟 IP(ClusterIP、ServiceIP 或 VIP)和端口号。
  • 能够提供某种远程服务能力。
  • 被映射到提供这种服务能力的一组容器应用上。

Pod

 Pod 运行在一个被称为节点(Node)的环境中,这个节点既可以是物理机,也可以是私有云或公有云种的一个徐牛基,通常在一个节点上运行几个 Pod;其次,在每个 Pod 种都运行着一个特殊的被称为 Pause 的容器,其他容器则为业务容器,这些业务容器共享 Pause 容器的网络战和 Volume 挂载卷,因此它们之间的通信和数据交换更为高效,在设计时我们可以充分利用这一特性将一组密切相关的服务进程放入同一个 Pod。

Master 、Node

 在集群管理方面,将集群中的机器划分为一个 Master 和一些 Node。

  • Master:运行着集群管理相关的一组进程 kube-apiserver、kube-controller-manager、kube-scheduler,这些进程实现了整个集群的资源管理、Pod 调度、弹性伸缩、安全控制、系统监控和纠错等管理功能,并且都是自动完成。
  • Node:作为集群中的工作节点,运行真的应用程序,在 Node 上 Kubernetes 管理的最小单元是 Pod。在 Node 上运行着 Kubernetes 的 kubelet、kube-proxy 服务进程,这些服务进程负责 Pod 的创建、启动、监控、重启、销毁、以及实现软件模式的负载均衡器。

K8S 完美解决:服务扩容、服务升级

 传统 IT 系统中服务的扩容是通过人工一步步操作才得以解决(实例的部署和启动环节),费时费力又难以保证实施质量。反观在 K8S 中,只需要为需要扩容的 Service 关联的 Pod 创建一个 RC(Replication Controller),服务扩容以至服务升级令人头疼的问题都迎刃而解,在一个 RC 定义文件中包括以下 3 个关键信息。

  • 目标 Pod 的定义。
  • 目标 Pod 需要运行的服务数量(Perlicas)。
  • 需要监控的目标 Pod 的标签。
     在创建好 RC(系统将自动创建好 Pod)后,Kubernetes 会通过在 RC 中定义的 Label 筛选出对应的 Pod 实例并实时监控其状态和数量,如果实例数量少于定义的副本数量,则会根据在 RC 中定义的 Pod 模板创建一个新的 Pod,然后将此 Pod 调度到合适的 Node 上启动运行,直到 Pod 实例的数量达到预定目标。这个过程完全是自动化的,无需人工干预。有了 RC,服务扩容就变成了一个纯粹的简单数字游戏了,只需修改 RC 中的副本数量即可。后续的服务升级也将通过修改 RC 来自动完成。

为什么要用 Kubernetes?

  • Docker 容器化技术已经被很多公司采用、从单机走向集群已成必然。而 K8S 是基于 Docker 的大规模容器化分布式系统解决方案。
  • 全面拥抱微服务架构。
  • 无缝衔接公有云(可以随时随地将整个系统搬迁到公有云上)
  • K8S 内在的弹性扩容机制可以轻松应对突发流量。
  • K8S 系统架构有着超强的横向扩容能力。

入门案例(Tomcat + MySQL)

mysql_RC 文件
 1.3.2_mysql-rc.yaml

 1apiVersion: v1
 2kind: ReplicationController	# 副本控制器 RC
 3metadata:
 4  name: mysql	# RC的名称,全局唯一
 5spec:
 6  replicas: 1	# Pod副本的期待数量
 7  selector:
 8    app: mysql	# 符合目标的Pod拥有此标签
 9  template:	# 根据此模版创建Pod的副本(实例)
10    metadata:
11      labels:
12        app: mysql	# Pod副本拥有的标签,对应RC的Selector
13    spec:
14      containers:		# Pod内容器的定义部分
15      - name: mysql	# 容器的名称
16        image: mysql	# 容器对应的 Docker Image
17        ports:
18        - containerPort: 3306		# 容器应用监听的端口号
19        env:				# 注入容器内的环境变量
20        - name: MYSQL_ROOT_PASSWORD
21          value: "123456"

发布 RC 文件

 1[root@master chapter1.32]# kubectl create -f 1.3.2_mysql-rc.yaml   
 2replicationcontroller/mysql created  
 3 
 4# 查看所有RC
 5[root@master chapter1.32]# kubectl get rc  
 6NAME    DESIRED   CURRENT   READY   AGE  
 7mysql   1         1         1       51m
 8
 9# 切换到node1 (pause为根容器)
10[root@node1 ~]# docker ps
11CONTAINER ID        IMAGE                  COMMAND                  CREATED             STATUS              PORTS               NAMES
12da14ecb8b708        mysql                  "docker-entrypoint.s…"   26 minutes ago      Up 26 minutes                           k8s_mysql_mysql-bbp6s_default_0d75a7b8-9784-44e8-b8cf-1cb1f93b342f_0
13b833a7bab57b        k8s.gcr.io/pause:3.1   "/pause"                 About an hour ago   Up About an hour                        k8s_POD_mysql-bbp6s_default_0d75a7b8-9784-44e8-b8cf-1cb1f93b342f_0
14
15# 查看Pod创建情况
16[root@master chapter1.32]# kubectl get pods
17NAME          READY   STATUS    RESTARTS   AGE
18mysql-bbp6s   1/1     Running   0          67m

Kubernetes Service 文件

1apiVersion: v1
2kind: Service	# 表明文件类型是   Kubernetes Service
3metadata:
4  name: mysql	# Service 的全局唯一名称
5spec:
6  ports:
7    - port: 3306	# Service提供服务的端口号
8  selector:		# Service对应的Pod拥有这里定义的标签
9    app: mysql

MySQL 创建 Service

1[root@master chapter1.32]# kubectl create -f 1.3.2_mysql-svc.yaml 
2service/mysql created
3
4# 查看刚才创建的Service
5[root@master chapter1.32]# kubectl get svc
6NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
7kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP    12h
8mysql        ClusterIP   10.110.58.188   <none>        3306/TCP   39s

tomcat_RC 文件

 1apiVersion: v1
 2kind: ReplicationController
 3metadata:
 4  name: myweb
 5spec:
 6  replicas: 5
 7  selector:
 8    app: myweb
 9  template:
10    metadata:
11      labels:
12        app: myweb
13    spec:
14      containers:
15      - name: myweb
16        image: kubeguide/tomcat-app:v1
17        ports:
18        - containerPort: 8080
19        env:
20        - name: MYSQL_SERVICE_HOST
21          value: 'mysql'
22        - name: MYSQL_SERVICE_PORT
23          value: '3306'

发布 RC 文件

 1[root@master chapter1.32]# kubectl create -f 1.3.3_myweb-rc.yaml 
 2replicationcontroller/myweb created
 3
 4[root@master chapter1.32]# kubectl get pods
 5NAME          READY   STATUS    RESTARTS   AGE
 6mysql-bbp6s   1/1     Running   0          108m
 7myweb-b26c5   1/1     Running   0          4m19s
 8myweb-jkv6m   1/1     Running   0          4m19s
 9myweb-qsvb5   1/1     Running   0          4m19s
10myweb-rddww   1/1     Running   0          4m19s
11myweb-w4wts   1/1     Running   0          4m19s

tomcat_Service 文件

 1apiVersion: v1
 2kind: Service
 3metadata:
 4  name: myweb
 5spec:
 6  type: NodePort
 7  ports:
 8    - port: 8080
 9      nodePort: 30001
10  selector:
11    app: myweb

创建 tomcat_Service

1[root@master chapter1.32]# kubectl create -f 1.3.3_myweb-svc.yaml 
2service/myweb create
3
4[root@master chapter1.32]# kubectl get services
5NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
6kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          12h
7mysql        ClusterIP   10.110.58.188    <none>        3306/TCP         31m
8myweb        NodePort    10.108.175.119   <none>        8080:30001/TCP   3

作者:Soulboy