1. 安装前环境配置 全部机器都执行
1.1 各个机器设置自己的hostname
hostnamectl set-hostname master-0x(worker-0x)
1.2 将SELinux
禁用
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
1.3 关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
1.4 关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
1.5 安装kubekey
所需依赖
yum install -y sudo curl openssl conntrack socat
2. 使用kubekey
安装k8s
选择任意一台机器执行即可 通常选master机器
2.1 下载kebekey
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v2.0.0 sh -
chmod +x kk
安装高版本k8s集群时 可能会出现安装失败的问题 因为这里使用的kubekey
2.0.0
版本 可能不支持高版本k8s集群
具体使用应该查看github
releases
下载最新版https://github.com/kubesphere/kubekey/releases
wget https://github.com/kubesphere/kubekey/releases/download/v2.2.1/kubekey-v2.2.1-linux-amd64.tar.gz
tar -xzf kubekey-v2.2.1-linux-amd64.tar.gz
chmod +x kk
2.2 创建配置文件
后面是k8s版本号 可更改 具体查看 以下链接
https://kubesphere.io/zh/docs/installing-on-linux/introduction/multioverview/
./kk create config --with-kubernetes v1.21.5
2.3 编辑生成的config-sample.yaml
配置文件
修改hosts和roleGroups配置即可
请注意机器的ip网段 是否与network里的配置重叠 若重叠则自行修改
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: ubuntu, password: "Qcloud@123"}
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, user: ubuntu, password: "Qcloud@123"}
roleGroups:
etcd:
- node1
control-plane:
- node1
worker:
- node1
- node2
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
# internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.21.5
clusterName: cluster.local
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
plainHTTP: false
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
addons: []
2.4 使用配置文件开始创建k8s集群
./kk create cluster -f config-sample.yaml
2.5 检查集群是否正常
kubectl get pod -A
2.6 新增节点 编辑配置文件新增一个hosts 并将新增的节点放入roleGroups
./kk add nodes -f config-sample.yaml
3. 共享存储
3.1 安装依赖 所有机器执行
yum install -y nfs-utils
3.2 作为存储的节点执行 通常为master
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
mkdir -p /nfs/data
systemctl enable rpcbind --now
systemctl enable nfs-server --now
exportfs -r
3.3 pod挂载nfs存储
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
hostPort: 80
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
nfs:
server: 10.10.10.161
path: /nfs/test/data
杂项
简单暴露集群内服务到外面
修改master
节点上的/etc/kubernetes/manifests/kube-apiserver.yaml
#在kube-apiserver最后面增加一个启动配置 设置node-port范围为1-65535
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=10.10.10.161
- --allow-privileged=true
- ....
- --service-node-port-range=1-65535
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis:6.2
name: redis
command: ["redis-server","/etc/redis/redis.conf"]
env:
- name: TZ
value: Asia/Shanghai
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /etc/redis/redis.conf
subPath: redis.conf
volumes:
- name: data
nfs:
server: master-01
path: /nfs/redis/data
- name: config
nfs:
server: master-01
path: /nfs/redis/config
---
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
selector:
app: redis
ports:
- port: 6379
protocol: TCP
targetPort: 6379
nodePort: 6379
type: NodePort
安装dashboard
先根据k8s
版本找到对应的dashboard
版本https://github.com/kubernetes/dashboard/releases
将下载好的yaml文件打开 后面加上以下配置 用于创建账号和授权
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
设置登录有效期 找到如下配置 新增一行--token-ttl=2592000
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.4.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
#新增行 设置登录有效期为30天
- --token-ttl=2592000
获取登录token
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
访问ui面板
1. 直接修改 type: ClusterIP 改为 type: NodePort
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
2. 拿集群内的nginx做个反向代理之类的
server {
listen 80;
listen 443 ssl http2;
server_name k8s.nikm.cn;
if ($server_port !~ 443){
rewrite ^(/.*)$ https://$host$1 permanent;
}
ssl_certificate /ssl/pem.txt;
ssl_certificate_key /ssl/key.txt;
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
add_header Strict-Transport-Security "max-age=31536000";
error_page 497 https://$host$request_uri;
location / {
proxy_pass "https://kubernetes-dashboard.kubernetes-dashboard:443";
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Cookie $http_cookie;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
access_log /var/log/nginx/k8s.nikm.cn.log;
error_log /var/log/nginx/k8s.nikm.cn.error.log;
}
安装metrics-server
用来使用kubectl top node
kubectl top pod -A
命令
先根据k8s
版本找到对应的metrics-server
版本https://github.com/kubernetes-sigs/metrics-server
这里安装最新版即可https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml
下载后新增一行 - --kubelet-insecure-tls
忽略https证书
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
#新增部分 忽略https证书
- --kubelet-insecure-tls
CIDRNotAvailable
报错修复
在查看节点状态时发现有个事件提醒 但是实际上节点运行正常 网段也没有重叠部分 Node worker-02 status is now: CIDRNotAvailable
解决方案 编辑/etc/kubernetes/manifests/kube-controller-manager.yaml
修改allocate-node-cidrs
为false
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
#- --allocate-node-cidrs=true
#修改为false
- --allocate-node-cidrs=false
- --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
- --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --cluster-cidr=10.10.12.0/24
- --cluster-name=cluster.local
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --controllers=*,bootstrapsigner,tokencleaner
- --experimental-cluster-signing-duration=87600h
- --feature-gates=RotateKubeletServerCertificate=true,TTLAfterFinished=true,ExpandCSIVolumes=true,CSIStorageCapacity=true
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --leader-elect=true
- --node-cidr-mask-size=24
- --port=0
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.10.11.0/24
- --use-service-account-credentials=true
kubectl 自动补全
yum install -y bash-completion
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/etc/bash_completion.d/kubectl
重新连接后ssh会话生效
关闭/var/spool/mail/root
提示
echo "unset MAILCHECK">> /etc/profile
source /etc/profile
版权属于:本文是原创文章,版权归 吾梦小站 所有。
本文链接:https://nikm.cn/archives/65.html
本站所有原创文章采用 知识共享署名-非商业性使用 4.0 国际许可协议 进行许可。
您可以自由地转载和修改,但请务必注明文章来源并且不可用于商业目的。