1. 安装前环境配置 全部机器都执行

1.1 各个机器设置自己的hostname

hostnamectl set-hostname master-0x(worker-0x)

1.2 将SELinux禁用

setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

1.3 关闭swap

swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab

1.4 关闭防火墙

systemctl stop firewalld.service
systemctl disable firewalld.service

1.5 安装kubekey所需依赖

yum install -y sudo curl openssl conntrack socat

2. 使用kubekey安装k8s 选择任意一台机器执行即可 通常选master机器

2.1 下载kebekey

export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v2.0.0 sh -
chmod +x kk

2.2 创建配置文件

后面是k8s版本号 可更改 具体查看 以下链接

https://kubesphere.io/zh/docs/installing-on-linux/introduction/multioverview/

./kk create config  --with-kubernetes v1.21.5

2.3 编辑生成的config-sample.yaml配置文件

修改hosts和roleGroups配置即可

请注意机器的ip网段 是否与network里的配置重叠 若重叠则自行修改

apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts:
  - {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: ubuntu, password: "Qcloud@123"}
  - {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, user: ubuntu, password: "Qcloud@123"}
  roleGroups:
    etcd:
    - node1
    control-plane: 
    - node1
    worker:
    - node1
    - node2
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    # internalLoadbalancer: haproxy

    domain: lb.kubesphere.local
    address: ""
    port: 6443
  kubernetes:
    version: v1.21.5
    clusterName: cluster.local
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
    multusCNI:
      enabled: false
  registry:
    plainHTTP: false
    privateRegistry: ""
    namespaceOverride: ""
    registryMirrors: []
    insecureRegistries: []
  addons: []

2.4 使用配置文件开始创建k8s集群

./kk create cluster -f config-sample.yaml

2.5 检查集群是否正常

kubectl get pod -A

2.6 新增节点 编辑配置文件新增一个hosts 并将新增的节点放入roleGroups

./kk add nodes -f config-sample.yaml

3. 共享存储

3.1 安装依赖 所有机器执行

yum install -y nfs-utils

3.2 作为存储的节点执行 通常为master

echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
mkdir -p /nfs/data
systemctl enable rpcbind --now
systemctl enable nfs-server --now
exportfs -r

3.3 pod挂载nfs存储

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx
        name: nginx
        ports:
          - containerPort: 80
            hostPort: 80
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          nfs:
            server: 10.10.10.161
            path: /nfs/test/data

杂项

简单暴露集群内服务到外面

修改master节点上的/etc/kubernetes/manifests/kube-apiserver.yaml

#在kube-apiserver最后面增加一个启动配置 设置node-port范围为1-65535
spec:
  containers:
    - command:
        - kube-apiserver
        - --advertise-address=10.10.10.161
        - --allow-privileged=true
        - ....
        - --service-node-port-range=1-65535
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
        - image: redis:6.2
          name: redis
          command: ["redis-server","/etc/redis/redis.conf"]
          env:
            - name: TZ
              value: Asia/Shanghai
          volumeMounts:
            - name: data
              mountPath: /data
            - name: config
              mountPath: /etc/redis/redis.conf
              subPath: redis.conf
      volumes:
        - name: data
          nfs:
            server: master-01
            path: /nfs/redis/data
        - name: config
          nfs:
            server: master-01
            path: /nfs/redis/config
---
apiVersion: v1
kind: Service
metadata:
  name: redis
spec:
  selector:
    app: redis
  ports:
    - port: 6379
      protocol: TCP
      targetPort: 6379
      nodePort: 6379
  type: NodePort

安装dashboard

先根据k8s版本找到对应的dashboard版本https://github.com/kubernetes/dashboard/releases

将下载好的yaml文件打开 后面加上以下配置 用于创建账号和授权

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

设置登录有效期 找到如下配置 新增一行--token-ttl=2592000

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.4.0
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            #新增行 设置登录有效期为30天
            - --token-ttl=2592000

获取登录token

kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"

访问ui面板

1. 直接修改 type: ClusterIP 改为 type: NodePort

kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

2. 拿集群内的nginx做个反向代理之类的

server {
    listen 80;
    listen 443 ssl http2;
    server_name k8s.nikm.cn;

    if ($server_port !~ 443){
        rewrite ^(/.*)$ https://$host$1 permanent;
    }

    ssl_certificate    /ssl/pem.txt;
    ssl_certificate_key    /ssl/key.txt;
    ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
    ssl_ciphers EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
    ssl_prefer_server_ciphers on;
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    add_header Strict-Transport-Security "max-age=31536000";
    error_page 497  https://$host$request_uri;

    location / {
        proxy_pass "https://kubernetes-dashboard.kubernetes-dashboard:443";
        proxy_http_version 1.1;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header Cookie $http_cookie;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
    }

    access_log  /var/log/nginx/k8s.nikm.cn.log;
    error_log  /var/log/nginx/k8s.nikm.cn.error.log;
}

kubectl 自动补全

yum install -y bash-completion
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/etc/bash_completion.d/kubectl

重新连接后ssh会话生效

关闭/var/spool/mail/root提示

echo "unset MAILCHECK">> /etc/profile
source /etc/profile
最后修改:2022 年 06 月 20 日 02 : 57 PM
如果觉得我的文章对你有用,请随意赞赏