大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。
Jetbrains全家桶1年46,售后保障稳定
网上的coredns.yaml文档都是粘贴复制的,不知所以然,授人以鱼不如授人以渔,官方coredns yaml文件下载地址:https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
1.下载coredns yaml
wget不下来的手动去复制吧
root@master01:mkdir /data/work/yaml/coredns/
root@master01:~# cd /data/work/yaml/coredns/
root@master01:/data/work/yaml/coredns# wget https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
root@master01:/data/work/yaml/coredns# cp coredns.yaml.base coredns.yaml
2.修改coredns.yaml
拿到yaml文件需要修改几处配置
#修改k8s集群后缀名称__DNS__DOMAIN__,一般为cluster.local
#77 kubernetes __DNS__DOMAIN__ in-addr.arpa ip6.arpa {
kubernetes cluster.local in-addr.arpa ip6.arpa {
#修改coredns谷歌地址为dockerhub地址,容易下载
#142 image: k8s.gcr.io/coredns/coredns:v1.8.6
image: coredns/coredns:1.8.6
#修改pod启动内存限制大小,300Mi即可
#146 memory: __DNS__MEMORY__LIMIT__
memory: 300Mi
#修改coredns的svcIP地址,一般为svc网段的第二位,10.100.0.2,第一位为apiserver的svc
#212 clusterIP: __DNS__SERVER__
clusterIP: 10.100.0.2
#修改coredns副本数,默认是1,且没有replicas字段
replicas: 3
coredns.yaml修改前后对比
完整coredns.yaml配置文件如下
cat coredns.yaml
# __MACHINE_GENERATED_WARNING__
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: coredns
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:v1.8.6
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.100.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
3.创建coredns
root@master01:/data/work/yaml/coredns# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
启动成功
4.测试coredns域名解析功能
部署nginx,tomcat容器及svc进行测试
cat nginx.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: nginx-deployment-label
name: nginx-deployment
namespace: test
spec:
# replicas: 1
selector:
matchLabels:
app: nginx-selector
template:
metadata:
labels:
app: nginx-selector
spec:
containers:
- name: linux66-nginx-container
image: nginx
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
- containerPort: 443
protocol: TCP
name: https
env:
- name: "password"
value: "123456"
- name: "age"
value: "18"
# resources:
# limits:
# cpu: 2
# memory: 2Gi
# requests:
# cpu: 500m
# memory: 1Gi
---
kind: Service
apiVersion: v1
metadata:
labels:
app: nginx-service-label
name: nginx-service
namespace: test
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30006
- name: https
port: 443
protocol: TCP
targetPort: 443
nodePort: 30443
selector:
app: nginx-selector
tomcat.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: tomcat-app1-deployment-label
name: tomcat-app1-deployment
spec:
replicas: 1
selector:
matchLabels:
app: tomcat-app1-selector
template:
metadata:
labels:
app: tomcat-app1-selector
spec:
containers:
- name: tomcat-app1-container
image: tomcat:7.0.94-alpine
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
env:
- name: "password"
value: "123456"
- name: "age"
value: "18"
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 500m
memory: "512Mi"
---
kind: Service
apiVersion: v1
metadata:
labels:
app: tomcat-app1-service-label
name: tomcat-app1-service
spec:
#type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
#nodePort: 40003
selector:
app: tomcat-app1-selector
进入容器进行网络测试,网络正常
root@master01:~# kubectl exec -it tomcat-app1-deployment-6c86988859-5rj9g sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/usr/local/tomcat # ping nginx-service.test.svc.cluster.local
PING nginx-service.test.svc.cluster.local (10.100.82.138): 56 data bytes
64 bytes from 10.100.82.138: seq=0 ttl=64 time=0.161 ms
64 bytes from 10.100.82.138: seq=1 ttl=64 time=0.134 ms
64 bytes from 10.100.82.138: seq=2 ttl=64 time=0.148 ms
64 bytes from 10.100.82.138: seq=3 ttl=64 time=0.146 ms
/usr/local/tomcat # nslookup nginx-service.test.svc.cluster.local 10.100.0.2
Server: 10.100.0.2
Address 1: 10.100.0.2 kube-dns.kube-system.svc.cluster.local
Name: nginx-service.test.svc.cluster.local
Address 1: 10.100.82.138 nginx-service.test.svc.cluster.local
5.pod无法正常域名解析排查思路
1.测试pod网络是否通讯,即使dns服务正常,因为pod内ping 域名,无法通过coredns pod返回解析结果也无法解析,这里要做的是排除calico,fannel等网络插件引起的pod之间网络不通的故障
2.查看pod中的dns配置是否是coredns服务的IP,pod内的容器本身就是一个完整的操作系统,dns地址配置错误也会引起无法正常接卸,这里的pod的中显示是ameserver 10.100.0.2是kubelet服务中的config.yaml 的clusterDNS字段决定
root@master01:~# kubectl exec -it tomcat-app1-deployment-6c86988859-5rj9g sh
/usr/local/tomcat # cat /etc/resolv.conf
nameserver 10.100.0.2 #kubelet服务中的config.yaml 的clusterDNS字段决定
search default.svc.cluster.local svc.cluster.local cluster.local local
options ndots:5
/usr/local/tomcat #
3.第二条中pod配置的nameserver 为svc的clusterIP,svc背后是真正的coredns服务,有一层代理转发,这里知道了流量走向,
要排查两个方向,一.转发动作是否有完成,排查kube-proxy是否还正常工作,svc的转发是依靠ipvs中的规则转发(或者iptables 转发),kube-proxy不能正常工作,svc将没有ipvs规则支撑进行转发。二.clusterIP 10.100.0.2 svc是否匹配到coredns pod,执行kubectl get ep 查看后端是否匹配到pod。
发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/200883.html原文链接:https://javaforall.cn
【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛
【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...