[toc]

安装准备

环境准备

主机名 外网IP 内网IP 角色 应用 组件
k8s01 10.0.0.11 172.16.1.11 K8S Master docker-19.03.15、kubeadm-1.19.3 apiServer、etcd、Scheduler、Controller、kubelet、proxy、docker
k8s02 10.0.0.12 172.16.1.12 K8S Node docker-19.03.15 kubelet、proxy、docker
k8s03 10.0.0.13 172.16.1.13 K8S Node docker-19.03.15 kubelet、proxy、docker

IP规划

三种Service IP
Pod IP 10.2.0.0/16
Cluster IP 10.1.0.0/16
NodePort IP(宿主机IP) 10.0.0.0/24

K8S安装方式

  • 二进制安装
  • ansible
  • kubeadm(生产最佳实践)
  • Rancher(图形化k8s管理界面)
  • 云服务:ACK EKS

安装k8s

环境准备(master & node)

  1. swap禁用

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    # kubelet禁用swap
    cat >/etc/sysconfig/kubelet <<EOF
    KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
    KUBELET_EXTRA_ARGS="--fail-swap-on=false"
    EOF

    # 修改配置文件
    sed -i '/swap/d' /etc/fstab

    # 关闭swap
    swapoff -a

    # 查看虚拟swap空间
    free -m
    total used free shared buff/cache available
    Mem: 972 91 735 7 145 722
    Swap: 0 0 0
  2. 内核开启iptables及内核转发

    1
    2
    3
    4
    5
    6
    # 内核开启iptables及内核转发
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.bridge.bridge-nf-call-ip6tables=1
    net.bridge.bridge-nf-call-iptables=1
    net.ipv4.ip_forward=1
    EOF
  3. 时间同步

    1
    2
    3
    4
    5
    # 安装时间同步工具
    yum install -y chrony

    # 启动服务
    systemctl start chronyd
  4. 加载ipvs模块

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    # 加载ipvs模块(修改网络参数,网络转发)
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #! /bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF

    # 给ipvs脚本添加执行权限
    chmod +x /etc/sysconfig/modules/ipvs.modules

    # 执行脚本
    source /etc/sysconfig/modules/ipvs.modules

    # 检查是否加载成功
    lsmod|grep -e 'ip_vs' -e 'nf_conntrack_ipv'
    nf_conntrack_ipv4 15053 0
    nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
    ip_vs_sh 12688 0
    ip_vs_wrr 12697 0
    ip_vs_rr 12600 0
    ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
    nf_conntrack 133095 2 ip_vs,nf_conntrack_ipv4
    libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
  5. 下载docker

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    # 下载docker官方源
    wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

    # 更改成清华源
    sed -i 's+https://download.docker.com+https://mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo

    # 查找其他版本的docker-ce
    yum list docker-ce --showduplicates

    # 安装docker-ce-19.03.15
    yum install -y docker-ce-19.03.15 docker-ce-cli-19.03.15

    # 修改docker配置文件
    mkdir -p /etc/docker
    cat >> /etc/docker/daemon.json <<-'EOF'
    {
    "registry-mirrors": ["https://pgz00k39.mirror.aliyuncs.com"],
    "exec-opts": ["native.cgroupdriver=systemd"]
    }
    EOF

    # 启动docker
    systemctl start docker

安装kuberadm(master)

  1. 下载kubeadm源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    # 下载kubeadm源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
  2. 安装K8S相关组件并启动

    1
    2
    3
    4
    5
    6
    # 安装k8s相关组件
    yum install kubelet-1.19.3 kubeadm-1.19.3 kubectl-1.19.3 ipvsadm -y

    # 启动kubelet,并设置开机自启
    systemctl enable kubelet
    systemctl start kubelet
  3. 初始化master

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    # 初始化master节点(只需要在master上执行:10.0.0.11)
    kubeadm init \
    --apiserver-advertise-address=10.0.0.11 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version=v1.19.3 \
    --service-cidr=10.1.0.0/16 \
    --pod-network-cidr=10.2.0.0/16 \
    --service-dns-domain=cluster.local \
    --ignore-preflight-errors=Swap \
    --ignore-preflight-errors=NumCPU

    ## 执行完成后,将下面一段话保存
    Your Kubernetes control-plane has initialized successfully!

    To start using your cluster, you need to run the following as a regular user:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes拍摄/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    https://kubernetes.io/docs/concepts/cluster-administration/addons/

    Then you can join any number of worker nodes by running the following on each as root:

    kubeadm join 10.0.0.11:6443 --token 3wpap7.23excjhcqoniyc89 \
    --discovery-token-ca-cert-hash sha256:b2a68af8b03d6a3e89cd9b5f97b506ff6c3055d7689ec58686713962d4590f0b

    ## 重置初始化的命令
    kubeadm reset
  4. 配置文件部署

    1
    2
    3
    4
    5
    6
    7
    8
    # 创建k8s配置文件目录
    mkdir -p $HOME/.kube

    # 拷贝k8s配置文件
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

    # 给k8s配置文件授权
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

node节点加入集群(Node)

1
2
3
4
5
6
# 其他node节点加入集群
kubeadm join 10.0.0.11:6443 --token 3wpap7.23excjhcqoniyc89 \
--discovery-token-ca-cert-hash sha256:b2a68af8b03d6a3e89cd9b5f97b506ff6c3055d7689ec58686713962d4590f0b

## 注意:如果忘记了以上加入节点的命令,则需要master执行以下命令,重新生成新的令牌(token),旧令牌同时失效
kubeadm token create --print-join-command

配置kubernetes网络

  1. 使用ipvs模块

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    # 在master上查看所有集群节点
    kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    k8s01 NotReady master 34m v1.19.3
    k8s02 NotReady <none> 118s v1.19.3
    k8s03 NotReady <none> 2m v1.19.3

    # 修改kube-proxy配置文件,使用ipvs模块
    kubectl edit configmap kube-proxy -n kube-system
    将mode:"" 改为 mode: "ipvs"

    ## k8s查看资源命令
    kubectl get [pod|namespace|controller|node|configmap...]

    # 重启kube-proxy
    ## 查看所有pod
    kubectl -n kube-system get pod
    kube-proxy-2d479 1/1 Running 0 14m
    kube-proxy-2ng8t 1/1 Running 0 46m
    kube-proxy-t2ztv 1/1 Running 0 14m

    ## 删除kube-proxy的pod
    kubectl -n kube-system delete pod kube-proxy-2d479
    kubectl -n kube-system delete pod kube-proxy-2ng8t
    kubectl -n kube-system delete pod kube-proxy-t2ztv
  2. 配置flannel网络

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    # 配置flannel网络
    ## 下载flannel资源清单文件
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kubeflannel.yml

    ## 或者通过'git'通过git命令下载flannel代码
    yum install -y git
    git clone --depth 1 https://github.com/coreos/flannel.git
    git clone https://github.com/flannel-io/flannel.git

    # 修改flannel资源清单
    vim kube-flannel.yml
    ## 修改ip
    net-conf.json: |
    {
    "Network": "10.244.0.0/16",
    "Backend": {
    "Type": "vxlan"
    }
    }
    ## 修改为:
    net-conf.json: |
    {
    "Network": "10.2.0.0/16",
    "Backend": {
    "Type": "vxlan"
    }
    }

    ## 修改网卡名称
    containers:
    - name: kube-flannel
    image: docker.io/flannel/flannel:v0.22.0
    #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.22.0
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr

    ## 修改为:
    containers:
    - name: kube-flannel
    image: docker.io/flannel/flannel:v0.22.0
    #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.22.0
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    - --iface=eth0

    # 应用当前资源清单
    kubectl apply -f kube-flannel.yml
    namespace/kube-flannel created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds created
  3. 查看k8s状态

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    # 查看flannel资源
    kubectl get pod -n kube-flannel
    NAME READY STATUS RESTARTS AGE
    kube-flannel-ds-gtjl4 1/1 Running 0 104s
    kube-flannel-ds-smrb9 1/1 Running 0 104s
    kube-flannel-ds-w5khx 1/1 Running 0 104s

    # 查看flannel资源创建过程
    kubectl describe pod kube-flannel-ds-gtjl4 -n kube-flannel

    # 查看node节点(都是Ready,k8s集群就ok了)
    kubectl get node
    NAME STATUS ROLES AGE VERSION
    k8s01 Ready master 69m v1.19.3
    k8s02 Ready <none> 37m v1.19.3
    k8s03 Ready <none> 37m v1.19.3

给node节点打标签

1
2
3
4
5
6
7
8
9
10
# 给node节点打标签
kubectl label nodes k8s02 node-role.kubernetes.io/node01=
kubectl label nodes k8s03 node-role.kubernetes.io/node02=

# 查看node节点
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s01 Ready master 75m v1.19.3
k8s02 Ready node01 42m v1.19.3
k8s03 Ready node02 42m v1.19.3

黑科技:k8s命令补全

1
2
3
4
5
6
7
8
# 下载bash-completion
yum install -y bash-completion

# 应用文件
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)

kubectl completion bash > /etc/bash_completion.d/kubectl

K8S命令语法

K8S常用选项

选项 作业
-n 指定名称空间
–image 指定镜像
-o 指定格式输出 常用:json|wide|yaml
-f 指定资源清单
–show-labels 查看标签
-c 连接pod时,指定pod中的容器名
-w 实时追踪

命令语法

  • 查看指定资源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    # 语法格式
    kubectl get [资源名称]

    # 示例
    kubectl get node
    kubectl get pod
    kubectl get namespace
    kubectl get ns
    kubectl get [所有k8s资源]
    ## No resources found in default namespace. default名称空间中没有找到pod
    ## k8s默认操作的名称空间,就是"default"名称空间
  • 查看指定资源的详细信息

    1
    2
    3
    4
    5
    6
    7
    # 语法格式
    kubectl describe [资源名称]

    # 示例
    kubectl describe pod
    kubectl describe namespace
    kubectl describe [所有k8s资源]
  • 给指定资源打标签

    1
    2
    3
    4
    5
    6
    7
    8
    # 语法格式
    kubectl label [资源名称]

    # 示例
    kubectl label node
    kubectl label pod
    kubectl label [所有k8s资源]
    ## 给node打角色标签需要用到k8s的api
  • 创建k8s指定资源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    # 语法格式
    kubectl create [资源名称]

    # 示例
    ## 创建名称空间
    kubectl create namespace hcl
    namespace/hcl created

    ## 查看"namespace"资源
    kubectl get ns
    NAME STATUS AGE
    default Active 3d8h
    hcl Active 1s
    kube-flannel Active 3d7h
    kube-node-lease Active 3d8h
    kube-public Active 3d8h
    kube-system Active 3d8h
    test-mysql Active 2d8h
  • 删除k8s指定资源

    1
    2
    3
    4
    5
    6
    7
    8
    # 语法格式
    kubectl delete [资源] [资源标识|标签]

    # 示例
    ## 删除pod
    kubectl delete pod nginx-565785f75c-c5rlr
    ## 删除namespace
    kubectl delete namespace hcl