九、部署集群插件
1、部署 coredns 插件
1.1:下载和配置 coredns
[root@k8s-master1 ~]# cd /opt/k8s/work
[root@k8s-master1 work]# git clone https://github.com/coredns/deployment.git
[root@k8s-master1 work]# mv deployment coredns-deployment
1.2:创建coredns
[root@k8s-master1 work]# cd /opt/k8s/work/coredns-deployment/kubernetes
[root@k8s-master1 kubernetes]# ./deploy.sh -i ${CLUSTER_DNS_SVC_IP} -d ${CLUSTER_DNS_DOMAIN} | kubectl apply -f -
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
1.3:检查coredns功能
[root@k8s-master1 kubernetes]# kubectl get all -n kube-system -l k8s-app=kube-dns

- 新建一个
Deployment
[root@k8s-master1 ~]# cd /opt/k8s/work
[root@k8s-master1 work]# cat > test-nginx.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-nginx
spec:
replicas: 2
selector:
matchLabels:
run: test-nginx
template:
metadata:
labels:
run: test-nginx
spec:
containers:
- name: test-nginx
image: nginx:1.16.1
ports:
- containerPort: 80
EOF
[root@k8s-master1 work]# kubectl create -f test-nginx.yaml
deployment.apps/test-nginx created
- export 该 Deployment, 生成
test-nginx
服务
[root@k8s-master1 work]# kubectl expose deploy test-nginx
service/test-nginx exposed
[root@k8s-master1 work]# kubectl get services test-nginx -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
test-nginx ClusterIP 10.254.240.74 <none> 80/TCP 14s run=test-nginx
- 创建另一个 Pod,查看 /etc/resolv.conf 是否包含 kubelet 配置的
--cluster-dns
和--cluster-domain
,是否能够将服务test-nginx
解析到上面显示的 Cluster IP 10.254.240.74
[root@k8s-master1 work]# cat > dnsutils-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: dnsutils-ds
labels:
app: dnsutils-ds
spec:
type: NodePort
selector:
app: dnsutils-ds
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: dnsutils-ds
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
app: dnsutils-ds
template:
metadata:
labels:
app: dnsutils-ds
spec:
containers:
- name: my-dnsutils
image: tutum/dnsutils:latest
command:
- sleep
- "3600"
ports:
- containerPort: 80
EOF
[root@k8s-master1 work]# kubectl create -f dnsutils-ds.yml
service/dnsutils-ds created
daemonset.apps/dnsutils-ds create
[root@k8s-master1 work]# kubectl get pods -lapp=dnsutils-ds -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dnsutils-ds-h2955 1/1 Running 0 7m22s 10.68.169.131 k8s-node2 <none> <none>
dnsutils-ds-n8vls 1/1 Running 0 7m22s 10.68.36.69 k8s-node1 <none> <none>
dnsutils-ds-xm65p 1/1 Running 0 7m22s 10.68.107.194 k8s-node3 <none> <none>
[root@k8s-master1 work]# kubectl -it exec dnsutils-ds-79nf4 cat /etc/resolv.conf
nameserver 10.254.0.2
search default.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
[root@k8s-master1 work]# kubectl -it exec dnsutils-ds-79nf4 nslookup kubernetes
Server: 10.254.0.2
Address: 10.254.0.2#53
Name: kubernetes.default.svc.cluster.local
Address: 10.254.0.1
[root@k8s-master1 work]# kubectl -it exec dnsutils-ds-79nf4 nslookup www.baidu.com
Server: 10.254.0.2
Address: 10.254.0.2#53
Non-authoritative answer:
www.baidu.com canonical name = www.a.shifen.com.
Name: www.a.shifen.com
Address: 180.101.49.12
Name: www.a.shifen.com
Address: 180.101.49.11
[root@k8s-master1 work]# kubectl -it exec dnsutils-ds-79nf4 nslookup www.baidu.com.
Server: 10.254.0.2
Address: 10.254.0.2#53
www.baidu.com canonical name = www.a.shifen.com.
Name: www.a.shifen.com
Address: 180.101.49.12
Name: www.a.shifen.com
Address: 180.101.49.11
[root@k8s-master1 work]# kubectl -it exec dnsutils-ds-79nf4 nslookup test-nginx
Server: 10.254.0.2
Address: 10.254.0.2#53
Name: test-nginx.default.svc.cluster.local
Address: 10.254.240.74
2、部署 dashboard 插件
2.1:下载和修改配置文件
[root@k8s-master1 ~]# cd /opt/k8s/work/
[root@k8s-master1 work]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
[root@k8s-master1 work]# mv recommended.yaml dashboard-recommended.yaml
2.2:执行所有定义文件
[root@k8s-master1 work]# kubectl apply -f dashboard-recommended.yaml

2.3:查看运行状态
[root@k8s-master1 work]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-6b4884c9d5-wsz5l 1/1 Running 0 16m
kubernetes-dashboard-7f99b75bf4-kbp7z 1/1 Running 0 16m
2.4:访问Dashboard
从 1.7 开始,dashboard 只允许通过 https 访问,如果使用 kube proxy 则必须监听 localhost 或 127.0.0.1。对于 NodePort 没有这个限制,但是仅建议在开发环境中使用。对于不满足这些条件的登录访问,在登录成功后浏览器不跳转,始终停在登录界面。
2.4.1:通过 port forward 访问 dashboard
- 启动端口转发
[root@k8s-master1 work]# kubectl port-forward -n kubernetes-dashboard svc/kubernetes-dashboard 4443:443 --address 0.0.0.0
- 浏览器访问 URL:https://192.168.66.62:4443

2.4.2:创建登录 token
[root@k8s-master1 ~]# kubectl create sa dashboard-admin -n kube-system
[root@k8s-master1 ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
[root@k8s-master1 ~]# ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
[root@k8s-master1 ~]# DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
[root@k8s-master1 ~]# echo ${DASHBOARD_LOGIN_TOKEN}


2.4.3:创建使用 token 的 KubeConfig 文件
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/cert/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=dashboard.kubeconfig
# 设置客户端认证参数,使用上面创建的 Token
kubectl config set-credentials dashboard_user \
--token=${DASHBOARD_LOGIN_TOKEN} \
--kubeconfig=dashboard.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=dashboard_user \
--kubeconfig=dashboard.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=dashboard.kubeconfig
- 用生成的 dashboard.kubeconfig 登录 Dashboard


3、部署 kube-prometheus 插件
- 项目地址:kube-prometheus
- 注意看项目README,
文章来源(Source):https://www.dqzboy.com 根据图中信息克隆对应的版本,如下图所示

3.1:下载和安装
[root@k8s-master1 ~]# cd /opt/k8s/work
[root@k8s-master1 work]# git clone https://github.com/coreos/kube-prometheus.git
[root@k8s-master1 work]# cd kube-prometheus/
[root@k8s-master1 work]# find . -name "*.yaml" -exec grep 'image: ' {} \;|awk '{print $NF}'|sort|uniq
[root@k8s-master1 work]# find . -name "*.yaml" -exec grep 'quay.io' {} \;|awk '{print $NF}'|sort|uniq
# 使用中科大的 Registry
[root@k8s-master1 kube-prometheus]# sed -i -e 's#quay.io#quay.mirrors.ustc.edu.cn#g' manifests/*.yaml manifests/setup/*.yaml
# 安装 prometheus-operator
[root@k8s-master1 kube-prometheus]# kubectl apply -f manifests/setup
# 安装 promethes metric adapter
[root@k8s-master1 kube-prometheus]# kubectl apply -f manifests/
3.2:查看运行状态
[root@k8s-master1 kube-prometheus]# kubectl get pods -n monitoring

3.3、访问 Prometheus UI
- 启动服务代理
[root@k8s-master1 kube-prometheus]# kubectl port-forward --address 0.0.0.0 pod/prometheus-k8s-0 -n monitoring 9090:9090
Forwarding from 0.0.0.0:9090 -> 9090
- 浏览器访问:http://ip:9090


3.4、访问 Grafana UI
- 启动服
文章来源(Source):浅时光博客 务代理
[root@k8s-master1 kube-prometheus]#
kubectl port-forward --address 0.0.0.0 svc/grafana -n monitoring 3000:3000
- 浏览器访问:http://ip:3000

必须 注册 为本站用户, 登录 后才可以发表评论!