目录

Life in Flow

知不知,尚矣;不知知,病矣。
不知不知,殆矣。

X

K8S

CentOS8 yum/dnf 配置国内源

# 挂载镜像
[root@localhost /]# mkdir /media/CentOS
[root@localhost /]# mount /dev/cdrom /media/CentOS

# 修改配置文件
[root@localhost /]# vi /etc/yum.repos.d/CentOS-AppStream.repo
[AppStream]
name=CentOS-$releasever - AppStream
baseurl=http://mirrors.aliyun.com/centos/$releasever/AppStream/$basearch/os/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

[root@localhost /]# vi /etc/yum.repos.d/CentOS-Base.repo
[BaseOS]
name=CentOS-$releasever - Base
baseurl=http://mirrors.aliyun.com/centos/$releasever/BaseOS/$basearch/os/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

[root@localhost /]# vi /etc/yum.repos.d/CentOS-Epel.repo
[epel]
name=CentOS-$releasever - Epel
baseurl=http://mirrors.aliyun.com/epel/8/Everything/$basearch
enabled=1
gpgcheck=0

[root@localhost /]# vi  /etc/yum.repos.d/CentOS-Media.repo
[c8-media-BaseOS]
name=CentOS-BaseOS-$releasever - Media
baseurl=file:///media/CentOS/BaseOS/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
[c8-media-AppStream]
name=CentOS-AppStream-$releasever - Media
baseurl=file:///media/CentOS/AppStream/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial

# 清除所有的缓存文件
[root@localhost /]# dnf clean all

# 制作元数据缓存
[root@localhost yum.repos.d]# dnf makecache
CentOS-8 - AppStream                                                                                      9.2 kB/s | 1.7 MB     03:04    
CentOS-8 - Base                                                                                            22 kB/s | 1.5 MB     01:09    
CentOS-8 - Epel                                                                                            41 kB/s | 3.5 MB     01:28    
CentOS-AppStream-8 - Media                                                                                133 MB/s | 5.2 MB     00:00    
CentOS-BaseOS-8 - Media                                                                                   120 MB/s | 2.2 MB     00:00    
Docker CE Stable - x86_64                                                                                 2.1 kB/s |  21 kB     00:10    
Failed to synchronize cache for repo 'AppStream', ignoring this repo.
Failed to synchronize cache for repo 'BaseOS', ignoring this repo.
Failed to synchronize cache fo

安装docker

yum install -y yum-utils  device-mapper-persistent-data  lvm2
yum-config-manager  --add-repo   https://download.docker.com/linux/centos/docker-ce.repo

# 解决报错:Problem: package docker-ce-3:19.03.4-3.el7.x86_64 requires containerd.io >= 1.2.2-3 那就先装新版的 containerd.io
[root@localhost yum.repos.d]# dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

#  安装docker-ce
[root@localhost yum.repos.d]# yum install docker-ce docker-ce-cli

# 部署docker
systemctl start docker

# 关闭selinux 和 防火墙
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# setenforce 0

部署K8S集群

# 分别设置两个主机的hostname
hostnamectl set-hostname master
hostnamectl set-hostname node1

# 修改国内的yum源
[root@localhost yum.repos.d]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
enabled=1

# 安装kubeadm和相关工具
[root@localhost yum.repos.d]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

# 启动服务并设置为开机启动
[root@localhost yum.repos.d]# systemctl enable docker && systemctl start docker 
[root@localhost yum.repos.d]# systemctl enable kubelet && systemctl start kubelet

# kubeadm config 命令获取默认的初始化参数文件
[root@localhost tmp]# kubeadm config print init-defaults > init.default.yaml

# 加速docker镜像(增加Registry Mirror)
[root@localhost tmp]# vi /etc/docker/daemon.json
{
  "registry-mirrors": [
    "https://dockerhub.azk8s.cn",
    "https://reg-mirror.qiniu.com"
  ]
}

# 自己一个一个下载国内的
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1

# 版本信息需要根据实际情况进行相应的修改。通过docker tag命令来修改镜像的标签:
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 k8s.gcr.io/kube-apiserver:v1.16.1  
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 k8s.gcr.io/kube-controller-manager:v1.16.1
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 k8s.gcr.io/kube-scheduler:v1.16.1
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 k8s.gcr.io/kube-proxy:v1.16.1
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2

# 配置swap忽略
[root@localhost tmp]# cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"

# 运行kubeadm init命令安装 Master
kubeadm init --kubernetes-version=v1.16.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12--ignore-preflight-errors=Swap
serviceSubnet: Invalid value: "10.96.0.0/12--ignore-preflight-errors=Swap": couldn't parse subnet
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# kubeadm init --kubernetes-version=v1.16.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
[init] Using Kubernetes version: v1.16.1
[preflight] Running pre-flight checks
	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
	[WARNING Swap]: running with swap on is not supported. Please disable swap
	[WARNING FileExisting-tc]: tc not found in system path
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [localhost.localdomain kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.31.201]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.31.201 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.31.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 34.501556 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: rowlq0.6dmbks5hyk5x767d
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.31.201:6443 --token rowlq0.6dmbks5hyk5x767d \
    --discovery-token-ca-cert-hash sha256:fddbabe7b4a8833252b467efb723973a8a5559dd8b8c1babf26322968372352b 

# 设置环境变量
[root@localhost ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
[root@localhost ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf

# 安装网络插件CNI(这里选择了weave)
$ kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"


# 安装Node,加入集群

# 安装docker
...

# 自己一个一个下载国内的 
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 

# 版本信息需要根据实际情况进行相应的修改。通过docker tag命令来修改镜像的标签: 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.1 k8s.gcr.io/kube-apiserver:v1.16.1 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.16.1 k8s.gcr.io/kube-controller-manager:v1.16.1 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.16.1 k8s.gcr.io/kube-scheduler:v1.16.1 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.16.1 k8s.gcr.io/kube-proxy:v1.16.1 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0 
[root@admin ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2

# 忽略swap
vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"

# 加入集群
[root@localhost ~]# kubeadm join 192.168.31.201:6443 --token rowlq0.6dmbks5hyk5x767d     --discovery-token-ca-cert-hash sha256:fddbabe7b4a8833252b467efb723973a8a5559dd8b8c1babf26322968372352b --ignore-preflight-errors=Swap
[preflight] Running pre-flight checks
	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
	[WARNING Swap]: running with swap on is not supported. Please disable swap
	[WARNING FileExisting-tc]: tc not found in system path
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# 主机回复原状态
kubeadm reset

# 查看节点
[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   3m14s   v1.16.3
node1    Ready    <none>   2m12s   v1.16.3

# 查看所有pods
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME                             READY   STATUS    RESTARTS   AGE    IP               NODE     NOMINATED NODE   READINESS GATES
coredns-5644d7b6d9-8gvbh         1/1     Running   0          2m5s   10.44.0.1        master   <none>           <none>
coredns-5644d7b6d9-t9kb5         0/1     Running   0          2m5s   10.32.0.3        node1    <none>           <none>
etcd-master                      1/1     Running   0          58s    192.168.31.201   master   <none>           <none>
kube-apiserver-master            1/1     Running   0          80s    192.168.31.201   master   <none>           <none>
kube-controller-manager-master   1/1     Running   0          82s    192.168.31.201   master   <none>           <none>
kube-proxy-csldc                 1/1     Running   0          2m5s   192.168.31.201   master   <none>           <none>
kube-proxy-mgbz4                 1/1     Running   0          82s    192.168.31.202   node1    <none>           <none>
kube-scheduler-master            1/1     Running   0          74s    192.168.31.201   master   <none>           <none>
weave-net-lb5lm                  2/2     Running   0          41s    192.168.31.201   master   <none>           <none>
weave-net-n4lvd                  2/2     Running   0          41s    192.168.31.202   node1    <none>           <none>

Kunernetes简介

 它是一个全新的基于容器技术的分布式架构领先方案。源于谷歌谷歌的Borg。目的是实现资源管理的自动化,以及跨多个数据中心的资源利用率的最大化。
 其次,如果系统遵循Kubenetes的设计思想,那么传统系统架构中哪些和业务没有多大关系的底层代码或者功能模块,统统不必再去关注(负载均衡、部署实施、服务治理、故障处理等…),总之,K8S提供的解决方案可以节约不少于30%的开发成本,而且还提供了强大的运维机制。

Service

 在Kubenetes中,Service是分布式集群架构的核心,一个Service对象拥有如下关键特征。

  • 拥有唯一指定的名称(mysql-server)
  • 拥有一个虚拟IP(ClusterIP、ServiceIP或VIP)和端口号。
  • 能够提供某种远程服务能力。
  • 被映射到提供这种服务能力的一组容器应用上。

Pod

 Pod运行在一个被称为节点(Node)的环境中,这个节点既可以是物理机,也可以是私有云或公有云种的一个徐牛基,通常在一个节点上运行几个Pod;其次,在每个Pod种都运行着一个特殊的被称为Pause的容器,其他容器则为业务容器,这些业务容器共享Pause容器的网络战和Volume挂载卷,因此它们之间的通信和数据交换更为高效,在设计时我们可以充分利用这一特性将一组密切相关的服务进程放入同一个Pod。

Master 、Node

 在集群管理方面,将集群中的机器划分为一个Master和一些Node。

  • Master:运行着集群管理相关的一组进程 kube-apiserver、kube-controller-manager、kube-scheduler,这些进程实现了整个集群的资源管理、Pod调度、弹性伸缩、安全控制、系统监控和纠错等管理功能,并且都是自动完成。
  • Node:作为集群中的工作节点,运行真的应用程序,在Node上Kubernetes管理的最小单元是Pod。在Node上运行着Kubernetes的kubelet、kube-proxy服务进程,这些服务进程负责Pod的创建、启动、监控、重启、销毁、以及实现软件模式的负载均衡器。

K8S完美解决:服务扩容、服务升级

 传统IT系统中服务的扩容是通过人工一步步操作才得以解决(实例的部署和启动环节),费时费力又难以保证实施质量。反观在K8S中,只需要为需要扩容的Service关联的Pod创建一个RC(Replication Controller),服务扩容以至服务升级令人头疼的问题都迎刃而解,在一个RC定义文件中包括以下3个关键信息。

  • 目标Pod的定义。
  • 目标Pod需要运行的服务数量(Perlicas)。
  • 需要监控的目标Pod的标签。
     在创建好RC(系统将自动创建好Pod)后,Kubernetes会通过在RC中定义的Label筛选出对应的Pod实例并实时监控其状态和数量,如果实例数量少于定义的副本数量,则会根据在RC中定义的Pod模板创建一个新的Pod,然后将此Pod调度到合适的Node上启动运行,直到Pod实例的数量达到预定目标。这个过程完全是自动化的,无需人工干预。有了RC,服务扩容就变成了一个纯粹的简单数字游戏了,只需修改RC中的副本数量即可。后续的服务升级也将通过修改RC来自动完成。

为什么要用Kubernetes?

  • Docker容器化技术已经被很多公司采用、从单机走向集群已成必然。而K8S是基于Docker的大规模容器化分布式系统解决方案。
  • 全面拥抱微服务架构。
  • 无缝衔接公有云(可以随时随地将整个系统搬迁到公有云上)
  • K8S内在的弹性扩容机制可以轻松应对突发流量。
  • K8S系统架构有着超强的横向扩容能力。

入门案例(Tomcat + Mysql)

mysql_RC文件
 1.3.2_mysql-rc.yaml

apiVersion: v1
kind: ReplicationController	# 副本控制器 RC
metadata:
  name: mysql	# RC的名称,全局唯一
spec:
  replicas: 1	# Pod副本的期待数量
  selector:
    app: mysql	# 符合目标的Pod拥有此标签
  template:	# 根据此模版创建Pod的副本(实例)
    metadata:
      labels:
        app: mysql	# Pod副本拥有的标签,对应RC的Selector
    spec:
      containers:		# Pod内容器的定义部分
      - name: mysql	# 容器的名称
        image: mysql	# 容器对应的 Docker Image
        ports:
        - containerPort: 3306		# 容器应用监听的端口号
        env:				# 注入容器内的环境变量
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"

发布RC文件

[root@master chapter1.32]# kubectl create -f 1.3.2_mysql-rc.yaml   
replicationcontroller/mysql created  
 
# 查看所有RC
[root@master chapter1.32]# kubectl get rc  
NAME    DESIRED   CURRENT   READY   AGE  
mysql   1         1         1       51m

# 切换到node1 (pause为根容器)
[root@node1 ~]# docker ps
CONTAINER ID        IMAGE                  COMMAND                  CREATED             STATUS              PORTS               NAMES
da14ecb8b708        mysql                  "docker-entrypoint.s…"   26 minutes ago      Up 26 minutes                           k8s_mysql_mysql-bbp6s_default_0d75a7b8-9784-44e8-b8cf-1cb1f93b342f_0
b833a7bab57b        k8s.gcr.io/pause:3.1   "/pause"                 About an hour ago   Up About an hour                        k8s_POD_mysql-bbp6s_default_0d75a7b8-9784-44e8-b8cf-1cb1f93b342f_0

# 查看Pod创建情况
[root@master chapter1.32]# kubectl get pods
NAME          READY   STATUS    RESTARTS   AGE
mysql-bbp6s   1/1     Running   0          67m

Kubernetes Service文件

apiVersion: v1
kind: Service	# 表明文件类型是   Kubernetes Service
metadata:
  name: mysql	# Service 的全局唯一名称
spec:
  ports:
    - port: 3306	# Service提供服务的端口号
  selector:		# Service对应的Pod拥有这里定义的标签
    app: mysql

mysql创建Service

[root@master chapter1.32]# kubectl create -f 1.3.2_mysql-svc.yaml 
service/mysql created

# 查看刚才创建的Service
[root@master chapter1.32]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP    12h
mysql        ClusterIP   10.110.58.188   <none>        3306/TCP   39s

tomcat_RC文件

apiVersion: v1
kind: ReplicationController
metadata:
  name: myweb
spec:
  replicas: 5
  selector:
    app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: kubeguide/tomcat-app:v1
        ports:
        - containerPort: 8080
        env:
        - name: MYSQL_SERVICE_HOST
          value: 'mysql'
        - name: MYSQL_SERVICE_PORT
          value: '3306'

发布RC文件

[root@master chapter1.32]# kubectl create -f 1.3.3_myweb-rc.yaml 
replicationcontroller/myweb created

[root@master chapter1.32]# kubectl get pods
NAME          READY   STATUS    RESTARTS   AGE
mysql-bbp6s   1/1     Running   0          108m
myweb-b26c5   1/1     Running   0          4m19s
myweb-jkv6m   1/1     Running   0          4m19s
myweb-qsvb5   1/1     Running   0          4m19s
myweb-rddww   1/1     Running   0          4m19s
myweb-w4wts   1/1     Running   0          4m19s

tomcat_Service文件

apiVersion: v1
kind: Service
metadata:
  name: myweb
spec:
  type: NodePort
  ports:
    - port: 8080
      nodePort: 30001
  selector:
    app: myweb

创建tomcat_Service

[root@master chapter1.32]# kubectl create -f 1.3.3_myweb-svc.yaml 
service/myweb create

[root@master chapter1.32]# kubectl get services
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          12h
mysql        ClusterIP   10.110.58.188    <none>        3306/TCP         31m
myweb        NodePort    10.108.175.119   <none>        8080:30001/TCP   3

作者:Soulboy