目 录CONTENT

文章目录

K8s基于资源定义创建haproxy的pod

ZiChen D
2021-12-22 / 0 评论 / 0 点赞 / 370 阅读 / 5,857 字 / 正在检测是否收录...

编写测试用web文件

测试用web镜像

[root@master httpd]# pwd
/root/httpd
[root@master httpd]# vim Dockerfile 
[root@master httpd]# cat Dockerfile 
FROM busybox

RUN mkdir  /data && \
    echo "test page on v1" > /data/index.html
ENTRYPOINT ["/bin/httpd","-f","-h","/data"]

[root@master httpd]# vim Dockerfile 
[root@master httpd]# cat Dockerfile 
FROM busybox

RUN mkdir /data && \
    echo "test page on v2" > /data/index.html
ENTRYPOINT ["/bin/httpd","-f","-h","/data"]

//镜像制作
docker build -t dengzichen/httpd:v1
docker build -t dengzichen/httpd:v2

[root@master httpd]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
dengzichen/httpd                                                  v1         37e49703bcb5   4 minutes ago   1.24MB
dengzichen/httpd                                                  v2         911a41ce1512   5 minutes ago   1.24MB

测试用web1 yaml文件编写

[root@master ~]# vim web-test1.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web1
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web1
  template:  
    metadata:
      labels:
        app: web1
    spec:
      containers:
      - name: web1
        image: dengzichen/httpd:v1
        magePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
  name: web1
  namespace: default
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web1
  type: NodePort

测试用web2 yaml文件编写

[root@master ~]# vim web-test2.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web2
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web2
  template:  
    metadata:
      labels:
        app: web2
    spec:
      containers:
      - name: httpd
        image: dengzichen/httpd:v2
        magePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
  name: web2
  namespace: default
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web2
  type: NodePort

基于文件创建pod

[root@master ~]# kubectl create -f web-test1.yaml 
deployment.apps/web1 created
service/web1 created

[root@master ~]# kubectl create -f web-test1.yaml 
deployment.apps/web2 created
service/web2 created

[root@master ~]# kubectl get pods
NAME                             READY   STATUS    RESTARTS   AGE
pod/web1-78cc34ddb-asg5v         1/1     Running   0          35s
pod/web2-d8cpaiq73-jhgd2         1/1     Running   0          26s

[root@master ~]# kubectl get svc
NAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
service/web1         NodePort    10.124.165.43    <none>        80:32001/TCP   35s
service/web2         NodePort    10.159.123.58    <none>        

编写haproxy.yaml文件

[root@master ~]# cat haproxy.yaml 
---
apiVersion: v1
kind: Pod
metadata:
  name: haproxy
  namespace: default
  labels:
    app: haproxy
spec:
  restartPolicy: OnFailure
  initContainers:
  - name: data
    volumeMounts:
    - name: data
      mountPath: /tmp
  containers:
  - image: dengzichen/haproxy:v2
    imagePullPolicy: IfNotPresent
    name: haproxy
    env:
      - name: RSIP
        value: "web1 web2"
    livenessProbe:
      tcpSocket:
        port: 80
      initialDelaySeconds: 20
      periodSeconds: 10
    readinessProbe:
      tcpSocket:
        port: 80
      initialDelaySeconds: 20
      periodSeconds: 10
          
---
apiVersion: v1
kind: Service
metadata:
  name: haproxy
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: haproxy
  type: NodePort

基于haproxy.yaml文件创建pod

[root@master ~]# kubectl create  -f haproxy.yaml 
deployment.apps/haproxy created
[root@master ~]# kubectl get pod -o wide
NAME                         READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
haproxy-d2khvu9c4-df8s9      1/1     Running   0          23s     10.244.1.68   node1   <none>           <none>
web1-78cc34ddb-asg5v         1/1     Running   0          3m18s   10.244.2.41   node2   <none>           <none>
web2-d8cpaiq73-jhgd2         1/1     Running   0          3m9s    10.244.2.42   node2   <none>           <none>

访问测试

[root@master ~]# curl 10.244.1.68
test page on v1
[root@master ~]# curl 10.231.1.68
test page on v2

查看配置文件

[root@master ~]# kubectl exec haproxy-d2khvu9c4-df8s9 -- cat /usr/local/haproxy/conf/haproxy.cfg
#--------------全局配置----------------
global
    log 127.0.0.1 local0  info
    #log loghost local0 info
    maxconn 20480
#chroot /usr/local/haproxy
    pidfile /var/run/haproxy.pid
    #maxconn 4000
    user haproxy
    group haproxy
    daemon
#---------------------------------------------------------------------
#common defaults that all the 'listen' and 'backend' sections will
#use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode http
    log global
    option dontlognull
    option httpclose
    option httplog
    #option forwardfor
    option redispatch
    balance roundrobin
    timeout connect 10s
    timeout client 10s
    timeout server 10s
    timeout check 10s
    maxconn 60000
    retries 3
#--------------统计页面配置------------------
listen admin_stats
    bind 0.0.0.0:8189
    stats enable
    mode http
    log global
    stats uri /haproxy_stats
    stats realm Haproxy\ Statistics
    stats auth admin:admin
    #stats hide-version
    stats admin if TRUE
    stats refresh 30s
#---------------web设置-----------------------
listen webcluster
    bind 0.0.0.0:80
    mode http
    #option httpchk GET /index.html
    log global
    maxconn 3000
    balance roundrobin
    cookie SESSION_COOKIE insert indirect nocache
    server web1 web1:80 check inter 2000 fall 5
    server web2 web2:80 check inter 2000 fall 5
0

评论区